1 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36 37 /* 38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by Christopher G. Demetriou 51 * for the NetBSD Project. 52 * 4. The name of the author may not be used to endorse or promote products 53 * derived from this software without specific prior written permission 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 */ 66 67 /* 68 * PCI IDE controller driver. 69 * 70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 71 * sys/dev/pci/ppb.c, revision 1.16). 72 * 73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 75 * 5/16/94" from the PCI SIG. 76 * 77 */ 78 79 #ifndef WDCDEBUG 80 #define WDCDEBUG 81 #endif 82 83 #define DEBUG_DMA 0x01 84 #define DEBUG_XFERS 0x02 85 #define DEBUG_FUNCS 0x08 86 #define DEBUG_PROBE 0x10 87 #ifdef WDCDEBUG 88 int wdcdebug_pciide_mask = 0; 89 #define WDCDEBUG_PRINT(args, level) \ 90 if (wdcdebug_pciide_mask & (level)) printf args 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <uvm/uvm_extern.h> 100 101 #include <machine/endian.h> 102 103 #include <dev/pci/pcireg.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcidevs.h> 106 #include <dev/pci/pciidereg.h> 107 #include <dev/pci/pciidevar.h> 108 #include <dev/pci/pciide_piix_reg.h> 109 #include <dev/pci/pciide_amd_reg.h> 110 #include <dev/pci/pciide_apollo_reg.h> 111 #include <dev/pci/pciide_cmd_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/pciide_acard_reg.h> 119 #include <dev/pci/cy82c693var.h> 120 121 #include "opt_pciide.h" 122 123 /* inlines for reading/writing 8-bit PCI registers */ 124 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t, 125 int)); 126 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t, 127 int, u_int8_t)); 128 129 static __inline u_int8_t 130 pciide_pci_read(pc, pa, reg) 131 pci_chipset_tag_t pc; 132 pcitag_t pa; 133 int reg; 134 { 135 136 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 137 ((reg & 0x03) * 8) & 0xff); 138 } 139 140 static __inline void 141 pciide_pci_write(pc, pa, reg, val) 142 pci_chipset_tag_t pc; 143 pcitag_t pa; 144 int reg; 145 u_int8_t val; 146 { 147 pcireg_t pcival; 148 149 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 150 pcival &= ~(0xff << ((reg & 0x03) * 8)); 151 pcival |= (val << ((reg & 0x03) * 8)); 152 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 153 } 154 155 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 156 157 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 158 void piix_setup_channel __P((struct channel_softc*)); 159 void piix3_4_setup_channel __P((struct channel_softc*)); 160 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 161 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*)); 162 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 163 164 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 165 void amd7x6_setup_channel __P((struct channel_softc*)); 166 167 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 168 void apollo_setup_channel __P((struct channel_softc*)); 169 170 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 171 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 172 void cmd0643_9_setup_channel __P((struct channel_softc*)); 173 void cmd_channel_map __P((struct pci_attach_args *, 174 struct pciide_softc *, int)); 175 int cmd_pci_intr __P((void *)); 176 void cmd646_9_irqack __P((struct channel_softc *)); 177 178 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 179 void cy693_setup_channel __P((struct channel_softc*)); 180 181 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 182 void sis_setup_channel __P((struct channel_softc*)); 183 184 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 185 void acer_setup_channel __P((struct channel_softc*)); 186 int acer_pci_intr __P((void *)); 187 188 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 189 void pdc202xx_setup_channel __P((struct channel_softc*)); 190 int pdc202xx_pci_intr __P((void *)); 191 int pdc20265_pci_intr __P((void *)); 192 193 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 194 void opti_setup_channel __P((struct channel_softc*)); 195 196 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 197 void hpt_setup_channel __P((struct channel_softc*)); 198 int hpt_pci_intr __P((void *)); 199 200 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 201 void acard_setup_channel __P((struct channel_softc*)); 202 int acard_pci_intr __P((void *)); 203 204 #ifdef PCIIDE_WINBOND_ENABLE 205 void winbond_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 206 #endif 207 208 void pciide_channel_dma_setup __P((struct pciide_channel *)); 209 int pciide_dma_table_setup __P((struct pciide_softc*, int, int)); 210 int pciide_dma_init __P((void*, int, int, void *, size_t, int)); 211 void pciide_dma_start __P((void*, int, int)); 212 int pciide_dma_finish __P((void*, int, int, int)); 213 void pciide_irqack __P((struct channel_softc *)); 214 void pciide_print_modes __P((struct pciide_channel *)); 215 216 struct pciide_product_desc { 217 u_int32_t ide_product; 218 int ide_flags; 219 const char *ide_name; 220 /* map and setup chip, probe drives */ 221 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*)); 222 }; 223 224 /* Flags for ide_flags */ 225 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 226 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 227 228 /* Default product description for devices not known from this controller */ 229 const struct pciide_product_desc default_product_desc = { 230 0, 231 0, 232 "Generic PCI IDE controller", 233 default_chip_map, 234 }; 235 236 const struct pciide_product_desc pciide_intel_products[] = { 237 { PCI_PRODUCT_INTEL_82092AA, 238 0, 239 "Intel 82092AA IDE controller", 240 default_chip_map, 241 }, 242 { PCI_PRODUCT_INTEL_82371FB_IDE, 243 0, 244 "Intel 82371FB IDE controller (PIIX)", 245 piix_chip_map, 246 }, 247 { PCI_PRODUCT_INTEL_82371SB_IDE, 248 0, 249 "Intel 82371SB IDE Interface (PIIX3)", 250 piix_chip_map, 251 }, 252 { PCI_PRODUCT_INTEL_82371AB_IDE, 253 0, 254 "Intel 82371AB IDE controller (PIIX4)", 255 piix_chip_map, 256 }, 257 { PCI_PRODUCT_INTEL_82440MX_IDE, 258 0, 259 "Intel 82440MX IDE controller", 260 piix_chip_map 261 }, 262 { PCI_PRODUCT_INTEL_82801AA_IDE, 263 0, 264 "Intel 82801AA IDE Controller (ICH)", 265 piix_chip_map, 266 }, 267 { PCI_PRODUCT_INTEL_82801AB_IDE, 268 0, 269 "Intel 82801AB IDE Controller (ICH0)", 270 piix_chip_map, 271 }, 272 { PCI_PRODUCT_INTEL_82801BA_IDE, 273 0, 274 "Intel 82801BA IDE Controller (ICH2)", 275 piix_chip_map, 276 }, 277 { PCI_PRODUCT_INTEL_82801BAM_IDE, 278 0, 279 "Intel 82801BAM IDE Controller (ICH2)", 280 piix_chip_map, 281 }, 282 { 0, 283 0, 284 NULL, 285 NULL 286 } 287 }; 288 289 const struct pciide_product_desc pciide_amd_products[] = { 290 { PCI_PRODUCT_AMD_PBC756_IDE, 291 0, 292 "Advanced Micro Devices AMD756 IDE Controller", 293 amd7x6_chip_map 294 }, 295 { PCI_PRODUCT_AMD_PBC766_IDE, 296 0, 297 "Advanced Micro Devices AMD766 IDE Controller", 298 amd7x6_chip_map 299 }, 300 { 0, 301 0, 302 NULL, 303 NULL 304 } 305 }; 306 307 const struct pciide_product_desc pciide_cmd_products[] = { 308 { PCI_PRODUCT_CMDTECH_640, 309 0, 310 "CMD Technology PCI0640", 311 cmd_chip_map 312 }, 313 { PCI_PRODUCT_CMDTECH_643, 314 0, 315 "CMD Technology PCI0643", 316 cmd0643_9_chip_map, 317 }, 318 { PCI_PRODUCT_CMDTECH_646, 319 0, 320 "CMD Technology PCI0646", 321 cmd0643_9_chip_map, 322 }, 323 { PCI_PRODUCT_CMDTECH_648, 324 IDE_PCI_CLASS_OVERRIDE, 325 "CMD Technology PCI0648", 326 cmd0643_9_chip_map, 327 }, 328 { PCI_PRODUCT_CMDTECH_649, 329 IDE_PCI_CLASS_OVERRIDE, 330 "CMD Technology PCI0649", 331 cmd0643_9_chip_map, 332 }, 333 { 0, 334 0, 335 NULL, 336 NULL 337 } 338 }; 339 340 const struct pciide_product_desc pciide_via_products[] = { 341 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 342 0, 343 NULL, 344 apollo_chip_map, 345 }, 346 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 347 0, 348 NULL, 349 apollo_chip_map, 350 }, 351 { 0, 352 0, 353 NULL, 354 NULL 355 } 356 }; 357 358 const struct pciide_product_desc pciide_cypress_products[] = { 359 { PCI_PRODUCT_CONTAQ_82C693, 360 IDE_16BIT_IOSPACE, 361 "Cypress 82C693 IDE Controller", 362 cy693_chip_map, 363 }, 364 { 0, 365 0, 366 NULL, 367 NULL 368 } 369 }; 370 371 const struct pciide_product_desc pciide_sis_products[] = { 372 { PCI_PRODUCT_SIS_5597_IDE, 373 0, 374 "Silicon Integrated System 5597/5598 IDE controller", 375 sis_chip_map, 376 }, 377 { 0, 378 0, 379 NULL, 380 NULL 381 } 382 }; 383 384 const struct pciide_product_desc pciide_acer_products[] = { 385 { PCI_PRODUCT_ALI_M5229, 386 0, 387 "Acer Labs M5229 UDMA IDE Controller", 388 acer_chip_map, 389 }, 390 { 0, 391 0, 392 NULL, 393 NULL 394 } 395 }; 396 397 const struct pciide_product_desc pciide_promise_products[] = { 398 { PCI_PRODUCT_PROMISE_ULTRA33, 399 IDE_PCI_CLASS_OVERRIDE, 400 "Promise Ultra33/ATA Bus Master IDE Accelerator", 401 pdc202xx_chip_map, 402 }, 403 { PCI_PRODUCT_PROMISE_ULTRA66, 404 IDE_PCI_CLASS_OVERRIDE, 405 "Promise Ultra66/ATA Bus Master IDE Accelerator", 406 pdc202xx_chip_map, 407 }, 408 { PCI_PRODUCT_PROMISE_ULTRA100, 409 IDE_PCI_CLASS_OVERRIDE, 410 "Promise Ultra100/ATA Bus Master IDE Accelerator", 411 pdc202xx_chip_map, 412 }, 413 { PCI_PRODUCT_PROMISE_ULTRA100X, 414 IDE_PCI_CLASS_OVERRIDE, 415 "Promise Ultra100/ATA Bus Master IDE Accelerator", 416 pdc202xx_chip_map, 417 }, 418 { 0, 419 0, 420 NULL, 421 NULL 422 } 423 }; 424 425 const struct pciide_product_desc pciide_opti_products[] = { 426 { PCI_PRODUCT_OPTI_82C621, 427 0, 428 "OPTi 82c621 PCI IDE controller", 429 opti_chip_map, 430 }, 431 { PCI_PRODUCT_OPTI_82C568, 432 0, 433 "OPTi 82c568 (82c621 compatible) PCI IDE controller", 434 opti_chip_map, 435 }, 436 { PCI_PRODUCT_OPTI_82D568, 437 0, 438 "OPTi 82d568 (82c621 compatible) PCI IDE controller", 439 opti_chip_map, 440 }, 441 { 0, 442 0, 443 NULL, 444 NULL 445 } 446 }; 447 448 const struct pciide_product_desc pciide_triones_products[] = { 449 { PCI_PRODUCT_TRIONES_HPT366, 450 IDE_PCI_CLASS_OVERRIDE, 451 NULL, 452 hpt_chip_map, 453 }, 454 { 0, 455 0, 456 NULL, 457 NULL 458 } 459 }; 460 461 const struct pciide_product_desc pciide_acard_products[] = { 462 { PCI_PRODUCT_ACARD_ATP850U, 463 IDE_PCI_CLASS_OVERRIDE, 464 "Acard ATP850U Ultra33 IDE Controller", 465 acard_chip_map, 466 }, 467 { PCI_PRODUCT_ACARD_ATP860, 468 IDE_PCI_CLASS_OVERRIDE, 469 "Acard ATP860 Ultra66 IDE Controller", 470 acard_chip_map, 471 }, 472 { PCI_PRODUCT_ACARD_ATP860A, 473 IDE_PCI_CLASS_OVERRIDE, 474 "Acard ATP860-A Ultra66 IDE Controller", 475 acard_chip_map, 476 }, 477 { 0, 478 0, 479 NULL, 480 NULL 481 } 482 }; 483 484 #ifdef PCIIDE_SERVERWORKS_ENABLE 485 const struct pciide_product_desc pciide_serverworks_products[] = { 486 { PCI_PRODUCT_SERVERWORKS_IDE, 487 0, 488 "ServerWorks ROSB4 IDE Controller", 489 piix_chip_map, 490 }, 491 { 0, 492 0, 493 NULL, 494 } 495 }; 496 #endif 497 498 #ifdef PCIIDE_WINBOND_ENABLE 499 const struct pciide_product_desc pciide_winbond_products[] = { 500 { PCI_PRODUCT_WINBOND_W83C553F_1, 501 0, 502 "Winbond W83C553F IDE controller", 503 winbond_chip_map, 504 }, 505 { 0, 506 0, 507 NULL, 508 } 509 }; 510 #endif 511 512 struct pciide_vendor_desc { 513 u_int32_t ide_vendor; 514 const struct pciide_product_desc *ide_products; 515 }; 516 517 const struct pciide_vendor_desc pciide_vendors[] = { 518 { PCI_VENDOR_INTEL, pciide_intel_products }, 519 { PCI_VENDOR_CMDTECH, pciide_cmd_products }, 520 { PCI_VENDOR_VIATECH, pciide_via_products }, 521 { PCI_VENDOR_CONTAQ, pciide_cypress_products }, 522 { PCI_VENDOR_SIS, pciide_sis_products }, 523 { PCI_VENDOR_ALI, pciide_acer_products }, 524 { PCI_VENDOR_PROMISE, pciide_promise_products }, 525 { PCI_VENDOR_AMD, pciide_amd_products }, 526 { PCI_VENDOR_OPTI, pciide_opti_products }, 527 { PCI_VENDOR_TRIONES, pciide_triones_products }, 528 { PCI_VENDOR_ACARD, pciide_acard_products }, 529 #ifdef PCIIDE_SERVERWORKS_ENABLE 530 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products }, 531 #endif 532 #ifdef PCIIDE_WINBOND_ENABLE 533 { PCI_VENDOR_WINBOND, pciide_winbond_products }, 534 #endif 535 { 0, NULL } 536 }; 537 538 /* options passed via the 'flags' config keyword */ 539 #define PCIIDE_OPTIONS_DMA 0x01 540 541 int pciide_match __P((struct device *, struct cfdata *, void *)); 542 void pciide_attach __P((struct device *, struct device *, void *)); 543 544 struct cfattach pciide_ca = { 545 sizeof(struct pciide_softc), pciide_match, pciide_attach 546 }; 547 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *)); 548 int pciide_mapregs_compat __P(( struct pci_attach_args *, 549 struct pciide_channel *, int, bus_size_t *, bus_size_t*)); 550 int pciide_mapregs_native __P((struct pci_attach_args *, 551 struct pciide_channel *, bus_size_t *, bus_size_t *, 552 int (*pci_intr) __P((void *)))); 553 void pciide_mapreg_dma __P((struct pciide_softc *, 554 struct pci_attach_args *)); 555 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t)); 556 void pciide_mapchan __P((struct pci_attach_args *, 557 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 558 int (*pci_intr) __P((void *)))); 559 int pciide_chan_candisable __P((struct pciide_channel *)); 560 void pciide_map_compat_intr __P(( struct pci_attach_args *, 561 struct pciide_channel *, int, int)); 562 int pciide_compat_intr __P((void *)); 563 int pciide_pci_intr __P((void *)); 564 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t)); 565 566 const struct pciide_product_desc * 567 pciide_lookup_product(id) 568 u_int32_t id; 569 { 570 const struct pciide_product_desc *pp; 571 const struct pciide_vendor_desc *vp; 572 573 for (vp = pciide_vendors; vp->ide_products != NULL; vp++) 574 if (PCI_VENDOR(id) == vp->ide_vendor) 575 break; 576 577 if ((pp = vp->ide_products) == NULL) 578 return NULL; 579 580 for (; pp->chip_map != NULL; pp++) 581 if (PCI_PRODUCT(id) == pp->ide_product) 582 break; 583 584 if (pp->chip_map == NULL) 585 return NULL; 586 return pp; 587 } 588 589 int 590 pciide_match(parent, match, aux) 591 struct device *parent; 592 struct cfdata *match; 593 void *aux; 594 { 595 struct pci_attach_args *pa = aux; 596 const struct pciide_product_desc *pp; 597 598 /* 599 * Check the ID register to see that it's a PCI IDE controller. 600 * If it is, we assume that we can deal with it; it _should_ 601 * work in a standardized way... 602 */ 603 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 604 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 605 return (1); 606 } 607 608 /* 609 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE 610 * controllers. Let see if we can deal with it anyway. 611 */ 612 pp = pciide_lookup_product(pa->pa_id); 613 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 614 return (1); 615 } 616 617 return (0); 618 } 619 620 void 621 pciide_attach(parent, self, aux) 622 struct device *parent, *self; 623 void *aux; 624 { 625 struct pci_attach_args *pa = aux; 626 pci_chipset_tag_t pc = pa->pa_pc; 627 pcitag_t tag = pa->pa_tag; 628 struct pciide_softc *sc = (struct pciide_softc *)self; 629 pcireg_t csr; 630 char devinfo[256]; 631 const char *displaydev; 632 633 sc->sc_pp = pciide_lookup_product(pa->pa_id); 634 if (sc->sc_pp == NULL) { 635 sc->sc_pp = &default_product_desc; 636 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); 637 displaydev = devinfo; 638 } else 639 displaydev = sc->sc_pp->ide_name; 640 641 /* if displaydev == NULL, printf is done in chip-specific map */ 642 if (displaydev) 643 printf(": %s (rev. 0x%02x)\n", displaydev, 644 PCI_REVISION(pa->pa_class)); 645 646 sc->sc_pc = pa->pa_pc; 647 sc->sc_tag = pa->pa_tag; 648 #ifdef WDCDEBUG 649 if (wdcdebug_pciide_mask & DEBUG_PROBE) 650 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 651 #endif 652 sc->sc_pp->chip_map(sc, pa); 653 654 if (sc->sc_dma_ok) { 655 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 656 csr |= PCI_COMMAND_MASTER_ENABLE; 657 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 658 } 659 WDCDEBUG_PRINT(("pciide: command/status register=%x\n", 660 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 661 } 662 663 /* tell wether the chip is enabled or not */ 664 int 665 pciide_chipen(sc, pa) 666 struct pciide_softc *sc; 667 struct pci_attach_args *pa; 668 { 669 pcireg_t csr; 670 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 671 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 672 PCI_COMMAND_STATUS_REG); 673 printf("%s: device disabled (at %s)\n", 674 sc->sc_wdcdev.sc_dev.dv_xname, 675 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 676 "device" : "bridge"); 677 return 0; 678 } 679 return 1; 680 } 681 682 int 683 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 684 struct pci_attach_args *pa; 685 struct pciide_channel *cp; 686 int compatchan; 687 bus_size_t *cmdsizep, *ctlsizep; 688 { 689 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 690 struct channel_softc *wdc_cp = &cp->wdc_channel; 691 692 cp->compat = 1; 693 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 694 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 695 696 wdc_cp->cmd_iot = pa->pa_iot; 697 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 698 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 699 printf("%s: couldn't map %s channel cmd regs\n", 700 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 701 return (0); 702 } 703 704 wdc_cp->ctl_iot = pa->pa_iot; 705 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 706 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 707 printf("%s: couldn't map %s channel ctl regs\n", 708 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 709 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 710 PCIIDE_COMPAT_CMD_SIZE); 711 return (0); 712 } 713 714 return (1); 715 } 716 717 int 718 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 719 struct pci_attach_args * pa; 720 struct pciide_channel *cp; 721 bus_size_t *cmdsizep, *ctlsizep; 722 int (*pci_intr) __P((void *)); 723 { 724 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 725 struct channel_softc *wdc_cp = &cp->wdc_channel; 726 const char *intrstr; 727 pci_intr_handle_t intrhandle; 728 729 cp->compat = 0; 730 731 if (sc->sc_pci_ih == NULL) { 732 if (pci_intr_map(pa, &intrhandle) != 0) { 733 printf("%s: couldn't map native-PCI interrupt\n", 734 sc->sc_wdcdev.sc_dev.dv_xname); 735 return 0; 736 } 737 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 738 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 739 intrhandle, IPL_BIO, pci_intr, sc); 740 if (sc->sc_pci_ih != NULL) { 741 printf("%s: using %s for native-PCI interrupt\n", 742 sc->sc_wdcdev.sc_dev.dv_xname, 743 intrstr ? intrstr : "unknown interrupt"); 744 } else { 745 printf("%s: couldn't establish native-PCI interrupt", 746 sc->sc_wdcdev.sc_dev.dv_xname); 747 if (intrstr != NULL) 748 printf(" at %s", intrstr); 749 printf("\n"); 750 return 0; 751 } 752 } 753 cp->ih = sc->sc_pci_ih; 754 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 755 PCI_MAPREG_TYPE_IO, 0, 756 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) { 757 printf("%s: couldn't map %s channel cmd regs\n", 758 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 759 return 0; 760 } 761 762 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 763 PCI_MAPREG_TYPE_IO, 0, 764 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) { 765 printf("%s: couldn't map %s channel ctl regs\n", 766 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 767 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 768 return 0; 769 } 770 /* 771 * In native mode, 4 bytes of I/O space are mapped for the control 772 * register, the control register is at offset 2. Pass the generic 773 * code a handle for only one byte at the rigth offset. 774 */ 775 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 776 &wdc_cp->ctl_ioh) != 0) { 777 printf("%s: unable to subregion %s channel ctl regs\n", 778 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 779 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 780 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 781 return 0; 782 } 783 return (1); 784 } 785 786 void 787 pciide_mapreg_dma(sc, pa) 788 struct pciide_softc *sc; 789 struct pci_attach_args *pa; 790 { 791 pcireg_t maptype; 792 bus_addr_t addr; 793 794 /* 795 * Map DMA registers 796 * 797 * Note that sc_dma_ok is the right variable to test to see if 798 * DMA can be done. If the interface doesn't support DMA, 799 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 800 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 801 * non-zero if the interface supports DMA and the registers 802 * could be mapped. 803 * 804 * XXX Note that despite the fact that the Bus Master IDE specs 805 * XXX say that "The bus master IDE function uses 16 bytes of IO 806 * XXX space," some controllers (at least the United 807 * XXX Microelectronics UM8886BF) place it in memory space. 808 */ 809 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 810 PCIIDE_REG_BUS_MASTER_DMA); 811 812 switch (maptype) { 813 case PCI_MAPREG_TYPE_IO: 814 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 815 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 816 &addr, NULL, NULL) == 0); 817 if (sc->sc_dma_ok == 0) { 818 printf(", but unused (couldn't query registers)"); 819 break; 820 } 821 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 822 && addr >= 0x10000) { 823 sc->sc_dma_ok = 0; 824 printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr); 825 break; 826 } 827 /* FALLTHROUGH */ 828 829 case PCI_MAPREG_MEM_TYPE_32BIT: 830 sc->sc_dma_ok = (pci_mapreg_map(pa, 831 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 832 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 833 sc->sc_dmat = pa->pa_dmat; 834 if (sc->sc_dma_ok == 0) { 835 printf(", but unused (couldn't map registers)"); 836 } else { 837 sc->sc_wdcdev.dma_arg = sc; 838 sc->sc_wdcdev.dma_init = pciide_dma_init; 839 sc->sc_wdcdev.dma_start = pciide_dma_start; 840 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 841 } 842 break; 843 844 default: 845 sc->sc_dma_ok = 0; 846 printf(", but unsupported register maptype (0x%x)", maptype); 847 } 848 } 849 850 int 851 pciide_compat_intr(arg) 852 void *arg; 853 { 854 struct pciide_channel *cp = arg; 855 856 #ifdef DIAGNOSTIC 857 /* should only be called for a compat channel */ 858 if (cp->compat == 0) 859 panic("pciide compat intr called for non-compat chan %p\n", cp); 860 #endif 861 return (wdcintr(&cp->wdc_channel)); 862 } 863 864 int 865 pciide_pci_intr(arg) 866 void *arg; 867 { 868 struct pciide_softc *sc = arg; 869 struct pciide_channel *cp; 870 struct channel_softc *wdc_cp; 871 int i, rv, crv; 872 873 rv = 0; 874 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 875 cp = &sc->pciide_channels[i]; 876 wdc_cp = &cp->wdc_channel; 877 878 /* If a compat channel skip. */ 879 if (cp->compat) 880 continue; 881 /* if this channel not waiting for intr, skip */ 882 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 883 continue; 884 885 crv = wdcintr(wdc_cp); 886 if (crv == 0) 887 ; /* leave rv alone */ 888 else if (crv == 1) 889 rv = 1; /* claim the intr */ 890 else if (rv == 0) /* crv should be -1 in this case */ 891 rv = crv; /* if we've done no better, take it */ 892 } 893 return (rv); 894 } 895 896 void 897 pciide_channel_dma_setup(cp) 898 struct pciide_channel *cp; 899 { 900 int drive; 901 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 902 struct ata_drive_datas *drvp; 903 904 for (drive = 0; drive < 2; drive++) { 905 drvp = &cp->wdc_channel.ch_drive[drive]; 906 /* If no drive, skip */ 907 if ((drvp->drive_flags & DRIVE) == 0) 908 continue; 909 /* setup DMA if needed */ 910 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 911 (drvp->drive_flags & DRIVE_UDMA) == 0) || 912 sc->sc_dma_ok == 0) { 913 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 914 continue; 915 } 916 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 917 != 0) { 918 /* Abort DMA setup */ 919 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 920 continue; 921 } 922 } 923 } 924 925 int 926 pciide_dma_table_setup(sc, channel, drive) 927 struct pciide_softc *sc; 928 int channel, drive; 929 { 930 bus_dma_segment_t seg; 931 int error, rseg; 932 const bus_size_t dma_table_size = 933 sizeof(struct idedma_table) * NIDEDMA_TABLES; 934 struct pciide_dma_maps *dma_maps = 935 &sc->pciide_channels[channel].dma_maps[drive]; 936 937 /* If table was already allocated, just return */ 938 if (dma_maps->dma_table) 939 return 0; 940 941 /* Allocate memory for the DMA tables and map it */ 942 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 943 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 944 BUS_DMA_NOWAIT)) != 0) { 945 printf("%s:%d: unable to allocate table DMA for " 946 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 947 channel, drive, error); 948 return error; 949 } 950 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 951 dma_table_size, 952 (caddr_t *)&dma_maps->dma_table, 953 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 954 printf("%s:%d: unable to map table DMA for" 955 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 956 channel, drive, error); 957 return error; 958 } 959 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 960 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 961 (unsigned long)seg.ds_addr), DEBUG_PROBE); 962 963 /* Create and load table DMA map for this disk */ 964 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 965 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 966 &dma_maps->dmamap_table)) != 0) { 967 printf("%s:%d: unable to create table DMA map for " 968 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 969 channel, drive, error); 970 return error; 971 } 972 if ((error = bus_dmamap_load(sc->sc_dmat, 973 dma_maps->dmamap_table, 974 dma_maps->dma_table, 975 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 976 printf("%s:%d: unable to load table DMA map for " 977 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 978 channel, drive, error); 979 return error; 980 } 981 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 982 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 983 DEBUG_PROBE); 984 /* Create a xfer DMA map for this drive */ 985 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 986 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN, 987 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 988 &dma_maps->dmamap_xfer)) != 0) { 989 printf("%s:%d: unable to create xfer DMA map for " 990 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 991 channel, drive, error); 992 return error; 993 } 994 return 0; 995 } 996 997 int 998 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 999 void *v; 1000 int channel, drive; 1001 void *databuf; 1002 size_t datalen; 1003 int flags; 1004 { 1005 struct pciide_softc *sc = v; 1006 int error, seg; 1007 struct pciide_dma_maps *dma_maps = 1008 &sc->pciide_channels[channel].dma_maps[drive]; 1009 1010 error = bus_dmamap_load(sc->sc_dmat, 1011 dma_maps->dmamap_xfer, 1012 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1013 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); 1014 if (error) { 1015 printf("%s:%d: unable to load xfer DMA map for" 1016 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1017 channel, drive, error); 1018 return error; 1019 } 1020 1021 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1022 dma_maps->dmamap_xfer->dm_mapsize, 1023 (flags & WDC_DMA_READ) ? 1024 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1025 1026 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1027 #ifdef DIAGNOSTIC 1028 /* A segment must not cross a 64k boundary */ 1029 { 1030 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1031 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1032 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1033 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1034 printf("pciide_dma: segment %d physical addr 0x%lx" 1035 " len 0x%lx not properly aligned\n", 1036 seg, phys, len); 1037 panic("pciide_dma: buf align"); 1038 } 1039 } 1040 #endif 1041 dma_maps->dma_table[seg].base_addr = 1042 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1043 dma_maps->dma_table[seg].byte_count = 1044 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1045 IDEDMA_BYTE_COUNT_MASK); 1046 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1047 seg, le32toh(dma_maps->dma_table[seg].byte_count), 1048 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1049 1050 } 1051 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1052 htole32(IDEDMA_BYTE_COUNT_EOT); 1053 1054 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1055 dma_maps->dmamap_table->dm_mapsize, 1056 BUS_DMASYNC_PREWRITE); 1057 1058 /* Maps are ready. Start DMA function */ 1059 #ifdef DIAGNOSTIC 1060 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1061 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1062 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 1063 panic("pciide_dma_init: table align"); 1064 } 1065 #endif 1066 1067 /* Clear status bits */ 1068 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1069 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, 1070 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1071 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel)); 1072 /* Write table addr */ 1073 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1074 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel, 1075 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1076 /* set read/write */ 1077 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1078 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1079 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 1080 /* remember flags */ 1081 dma_maps->dma_flags = flags; 1082 return 0; 1083 } 1084 1085 void 1086 pciide_dma_start(v, channel, drive) 1087 void *v; 1088 int channel, drive; 1089 { 1090 struct pciide_softc *sc = v; 1091 1092 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 1093 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1094 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1095 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1096 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START); 1097 } 1098 1099 int 1100 pciide_dma_finish(v, channel, drive, force) 1101 void *v; 1102 int channel, drive; 1103 int force; 1104 { 1105 struct pciide_softc *sc = v; 1106 u_int8_t status; 1107 int error = 0; 1108 struct pciide_dma_maps *dma_maps = 1109 &sc->pciide_channels[channel].dma_maps[drive]; 1110 1111 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1112 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel); 1113 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1114 DEBUG_XFERS); 1115 1116 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 1117 return WDC_DMAST_NOIRQ; 1118 1119 /* stop DMA channel */ 1120 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1121 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1122 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1123 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START); 1124 1125 /* Unload the map of the data buffer */ 1126 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1127 dma_maps->dmamap_xfer->dm_mapsize, 1128 (dma_maps->dma_flags & WDC_DMA_READ) ? 1129 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1130 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1131 1132 if ((status & IDEDMA_CTL_ERR) != 0) { 1133 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1134 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1135 error |= WDC_DMAST_ERR; 1136 } 1137 1138 if ((status & IDEDMA_CTL_INTR) == 0) { 1139 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1140 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1141 drive, status); 1142 error |= WDC_DMAST_NOIRQ; 1143 } 1144 1145 if ((status & IDEDMA_CTL_ACT) != 0) { 1146 /* data underrun, may be a valid condition for ATAPI */ 1147 error |= WDC_DMAST_UNDER; 1148 } 1149 return error; 1150 } 1151 1152 void 1153 pciide_irqack(chp) 1154 struct channel_softc *chp; 1155 { 1156 struct pciide_channel *cp = (struct pciide_channel*)chp; 1157 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1158 1159 /* clear status bits in IDE DMA registers */ 1160 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1161 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel, 1162 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1163 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel)); 1164 } 1165 1166 /* some common code used by several chip_map */ 1167 int 1168 pciide_chansetup(sc, channel, interface) 1169 struct pciide_softc *sc; 1170 int channel; 1171 pcireg_t interface; 1172 { 1173 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1174 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1175 cp->name = PCIIDE_CHANNEL_NAME(channel); 1176 cp->wdc_channel.channel = channel; 1177 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1178 cp->wdc_channel.ch_queue = 1179 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1180 if (cp->wdc_channel.ch_queue == NULL) { 1181 printf("%s %s channel: " 1182 "can't allocate memory for command queue", 1183 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1184 return 0; 1185 } 1186 printf("%s: %s channel %s to %s mode\n", 1187 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1188 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 1189 "configured" : "wired", 1190 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 1191 "native-PCI" : "compatibility"); 1192 return 1; 1193 } 1194 1195 /* some common code used by several chip channel_map */ 1196 void 1197 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1198 struct pci_attach_args *pa; 1199 struct pciide_channel *cp; 1200 pcireg_t interface; 1201 bus_size_t *cmdsizep, *ctlsizep; 1202 int (*pci_intr) __P((void *)); 1203 { 1204 struct channel_softc *wdc_cp = &cp->wdc_channel; 1205 1206 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1207 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1208 pci_intr); 1209 else 1210 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1211 wdc_cp->channel, cmdsizep, ctlsizep); 1212 1213 if (cp->hw_ok == 0) 1214 return; 1215 wdc_cp->data32iot = wdc_cp->cmd_iot; 1216 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1217 wdcattach(wdc_cp); 1218 } 1219 1220 /* 1221 * Generic code to call to know if a channel can be disabled. Return 1 1222 * if channel can be disabled, 0 if not 1223 */ 1224 int 1225 pciide_chan_candisable(cp) 1226 struct pciide_channel *cp; 1227 { 1228 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1229 struct channel_softc *wdc_cp = &cp->wdc_channel; 1230 1231 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1232 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1233 printf("%s: disabling %s channel (no drives)\n", 1234 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1235 cp->hw_ok = 0; 1236 return 1; 1237 } 1238 return 0; 1239 } 1240 1241 /* 1242 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1243 * Set hw_ok=0 on failure 1244 */ 1245 void 1246 pciide_map_compat_intr(pa, cp, compatchan, interface) 1247 struct pci_attach_args *pa; 1248 struct pciide_channel *cp; 1249 int compatchan, interface; 1250 { 1251 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1252 struct channel_softc *wdc_cp = &cp->wdc_channel; 1253 1254 if (cp->hw_ok == 0) 1255 return; 1256 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1257 return; 1258 1259 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 1260 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1261 pa, compatchan, pciide_compat_intr, cp); 1262 if (cp->ih == NULL) { 1263 #endif 1264 printf("%s: no compatibility interrupt for use by %s " 1265 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1266 cp->hw_ok = 0; 1267 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 1268 } 1269 #endif 1270 } 1271 1272 void 1273 pciide_print_modes(cp) 1274 struct pciide_channel *cp; 1275 { 1276 wdc_print_modes(&cp->wdc_channel); 1277 } 1278 1279 void 1280 default_chip_map(sc, pa) 1281 struct pciide_softc *sc; 1282 struct pci_attach_args *pa; 1283 { 1284 struct pciide_channel *cp; 1285 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1286 pcireg_t csr; 1287 int channel, drive; 1288 struct ata_drive_datas *drvp; 1289 u_int8_t idedma_ctl; 1290 bus_size_t cmdsize, ctlsize; 1291 char *failreason; 1292 1293 if (pciide_chipen(sc, pa) == 0) 1294 return; 1295 1296 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1297 printf("%s: bus-master DMA support present", 1298 sc->sc_wdcdev.sc_dev.dv_xname); 1299 if (sc->sc_pp == &default_product_desc && 1300 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1301 PCIIDE_OPTIONS_DMA) == 0) { 1302 printf(", but unused (no driver support)"); 1303 sc->sc_dma_ok = 0; 1304 } else { 1305 pciide_mapreg_dma(sc, pa); 1306 if (sc->sc_dma_ok != 0) 1307 printf(", used without full driver " 1308 "support"); 1309 } 1310 } else { 1311 printf("%s: hardware does not support DMA", 1312 sc->sc_wdcdev.sc_dev.dv_xname); 1313 sc->sc_dma_ok = 0; 1314 } 1315 printf("\n"); 1316 if (sc->sc_dma_ok) { 1317 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1318 sc->sc_wdcdev.irqack = pciide_irqack; 1319 } 1320 sc->sc_wdcdev.PIO_cap = 0; 1321 sc->sc_wdcdev.DMA_cap = 0; 1322 1323 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1324 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1325 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1326 1327 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1328 cp = &sc->pciide_channels[channel]; 1329 if (pciide_chansetup(sc, channel, interface) == 0) 1330 continue; 1331 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1332 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1333 &ctlsize, pciide_pci_intr); 1334 } else { 1335 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1336 channel, &cmdsize, &ctlsize); 1337 } 1338 if (cp->hw_ok == 0) 1339 continue; 1340 /* 1341 * Check to see if something appears to be there. 1342 */ 1343 failreason = NULL; 1344 if (!wdcprobe(&cp->wdc_channel)) { 1345 failreason = "not responding; disabled or no drives?"; 1346 goto next; 1347 } 1348 /* 1349 * Now, make sure it's actually attributable to this PCI IDE 1350 * channel by trying to access the channel again while the 1351 * PCI IDE controller's I/O space is disabled. (If the 1352 * channel no longer appears to be there, it belongs to 1353 * this controller.) YUCK! 1354 */ 1355 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1356 PCI_COMMAND_STATUS_REG); 1357 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1358 csr & ~PCI_COMMAND_IO_ENABLE); 1359 if (wdcprobe(&cp->wdc_channel)) 1360 failreason = "other hardware responding at addresses"; 1361 pci_conf_write(sc->sc_pc, sc->sc_tag, 1362 PCI_COMMAND_STATUS_REG, csr); 1363 next: 1364 if (failreason) { 1365 printf("%s: %s channel ignored (%s)\n", 1366 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1367 failreason); 1368 cp->hw_ok = 0; 1369 bus_space_unmap(cp->wdc_channel.cmd_iot, 1370 cp->wdc_channel.cmd_ioh, cmdsize); 1371 bus_space_unmap(cp->wdc_channel.ctl_iot, 1372 cp->wdc_channel.ctl_ioh, ctlsize); 1373 } else { 1374 pciide_map_compat_intr(pa, cp, channel, interface); 1375 } 1376 if (cp->hw_ok) { 1377 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1378 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1379 wdcattach(&cp->wdc_channel); 1380 } 1381 } 1382 1383 if (sc->sc_dma_ok == 0) 1384 return; 1385 1386 /* Allocate DMA maps */ 1387 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1388 idedma_ctl = 0; 1389 cp = &sc->pciide_channels[channel]; 1390 for (drive = 0; drive < 2; drive++) { 1391 drvp = &cp->wdc_channel.ch_drive[drive]; 1392 /* If no drive, skip */ 1393 if ((drvp->drive_flags & DRIVE) == 0) 1394 continue; 1395 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1396 continue; 1397 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1398 /* Abort DMA setup */ 1399 printf("%s:%d:%d: can't allocate DMA maps, " 1400 "using PIO transfers\n", 1401 sc->sc_wdcdev.sc_dev.dv_xname, 1402 channel, drive); 1403 drvp->drive_flags &= ~DRIVE_DMA; 1404 } 1405 printf("%s:%d:%d: using DMA data transfers\n", 1406 sc->sc_wdcdev.sc_dev.dv_xname, 1407 channel, drive); 1408 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1409 } 1410 if (idedma_ctl != 0) { 1411 /* Add software bits in status register */ 1412 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1413 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1414 idedma_ctl); 1415 } 1416 } 1417 } 1418 1419 void 1420 piix_chip_map(sc, pa) 1421 struct pciide_softc *sc; 1422 struct pci_attach_args *pa; 1423 { 1424 struct pciide_channel *cp; 1425 int channel; 1426 u_int32_t idetim; 1427 bus_size_t cmdsize, ctlsize; 1428 1429 if (pciide_chipen(sc, pa) == 0) 1430 return; 1431 1432 printf("%s: bus-master DMA support present", 1433 sc->sc_wdcdev.sc_dev.dv_xname); 1434 pciide_mapreg_dma(sc, pa); 1435 printf("\n"); 1436 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1437 WDC_CAPABILITY_MODE; 1438 if (sc->sc_dma_ok) { 1439 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1440 sc->sc_wdcdev.irqack = pciide_irqack; 1441 switch(sc->sc_pp->ide_product) { 1442 case PCI_PRODUCT_INTEL_82371AB_IDE: 1443 case PCI_PRODUCT_INTEL_82440MX_IDE: 1444 case PCI_PRODUCT_INTEL_82801AA_IDE: 1445 case PCI_PRODUCT_INTEL_82801AB_IDE: 1446 case PCI_PRODUCT_INTEL_82801BA_IDE: 1447 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1448 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1449 } 1450 } 1451 sc->sc_wdcdev.PIO_cap = 4; 1452 sc->sc_wdcdev.DMA_cap = 2; 1453 switch(sc->sc_pp->ide_product) { 1454 case PCI_PRODUCT_INTEL_82801AA_IDE: 1455 sc->sc_wdcdev.UDMA_cap = 4; 1456 break; 1457 case PCI_PRODUCT_INTEL_82801BA_IDE: 1458 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1459 sc->sc_wdcdev.UDMA_cap = 5; 1460 break; 1461 default: 1462 sc->sc_wdcdev.UDMA_cap = 2; 1463 } 1464 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1465 sc->sc_wdcdev.set_modes = piix_setup_channel; 1466 else 1467 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1468 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1469 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1470 1471 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1472 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1473 DEBUG_PROBE); 1474 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1475 WDCDEBUG_PRINT((", sidetim=0x%x", 1476 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1477 DEBUG_PROBE); 1478 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1479 WDCDEBUG_PRINT((", udamreg 0x%x", 1480 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1481 DEBUG_PROBE); 1482 } 1483 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1484 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1485 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1486 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) { 1487 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1488 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1489 DEBUG_PROBE); 1490 } 1491 1492 } 1493 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1494 1495 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1496 cp = &sc->pciide_channels[channel]; 1497 /* PIIX is compat-only */ 1498 if (pciide_chansetup(sc, channel, 0) == 0) 1499 continue; 1500 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1501 if ((PIIX_IDETIM_READ(idetim, channel) & 1502 PIIX_IDETIM_IDE) == 0) { 1503 printf("%s: %s channel ignored (disabled)\n", 1504 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1505 continue; 1506 } 1507 /* PIIX are compat-only pciide devices */ 1508 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1509 if (cp->hw_ok == 0) 1510 continue; 1511 if (pciide_chan_candisable(cp)) { 1512 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1513 channel); 1514 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1515 idetim); 1516 } 1517 pciide_map_compat_intr(pa, cp, channel, 0); 1518 if (cp->hw_ok == 0) 1519 continue; 1520 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1521 } 1522 1523 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1524 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1525 DEBUG_PROBE); 1526 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1527 WDCDEBUG_PRINT((", sidetim=0x%x", 1528 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1529 DEBUG_PROBE); 1530 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1531 WDCDEBUG_PRINT((", udamreg 0x%x", 1532 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1533 DEBUG_PROBE); 1534 } 1535 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1536 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1537 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1538 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) { 1539 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1540 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1541 DEBUG_PROBE); 1542 } 1543 } 1544 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1545 } 1546 1547 void 1548 piix_setup_channel(chp) 1549 struct channel_softc *chp; 1550 { 1551 u_int8_t mode[2], drive; 1552 u_int32_t oidetim, idetim, idedma_ctl; 1553 struct pciide_channel *cp = (struct pciide_channel*)chp; 1554 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1555 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1556 1557 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1558 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1559 idedma_ctl = 0; 1560 1561 /* set up new idetim: Enable IDE registers decode */ 1562 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1563 chp->channel); 1564 1565 /* setup DMA */ 1566 pciide_channel_dma_setup(cp); 1567 1568 /* 1569 * Here we have to mess up with drives mode: PIIX can't have 1570 * different timings for master and slave drives. 1571 * We need to find the best combination. 1572 */ 1573 1574 /* If both drives supports DMA, take the lower mode */ 1575 if ((drvp[0].drive_flags & DRIVE_DMA) && 1576 (drvp[1].drive_flags & DRIVE_DMA)) { 1577 mode[0] = mode[1] = 1578 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1579 drvp[0].DMA_mode = mode[0]; 1580 drvp[1].DMA_mode = mode[1]; 1581 goto ok; 1582 } 1583 /* 1584 * If only one drive supports DMA, use its mode, and 1585 * put the other one in PIO mode 0 if mode not compatible 1586 */ 1587 if (drvp[0].drive_flags & DRIVE_DMA) { 1588 mode[0] = drvp[0].DMA_mode; 1589 mode[1] = drvp[1].PIO_mode; 1590 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1591 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1592 mode[1] = drvp[1].PIO_mode = 0; 1593 goto ok; 1594 } 1595 if (drvp[1].drive_flags & DRIVE_DMA) { 1596 mode[1] = drvp[1].DMA_mode; 1597 mode[0] = drvp[0].PIO_mode; 1598 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1599 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1600 mode[0] = drvp[0].PIO_mode = 0; 1601 goto ok; 1602 } 1603 /* 1604 * If both drives are not DMA, takes the lower mode, unless 1605 * one of them is PIO mode < 2 1606 */ 1607 if (drvp[0].PIO_mode < 2) { 1608 mode[0] = drvp[0].PIO_mode = 0; 1609 mode[1] = drvp[1].PIO_mode; 1610 } else if (drvp[1].PIO_mode < 2) { 1611 mode[1] = drvp[1].PIO_mode = 0; 1612 mode[0] = drvp[0].PIO_mode; 1613 } else { 1614 mode[0] = mode[1] = 1615 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1616 drvp[0].PIO_mode = mode[0]; 1617 drvp[1].PIO_mode = mode[1]; 1618 } 1619 ok: /* The modes are setup */ 1620 for (drive = 0; drive < 2; drive++) { 1621 if (drvp[drive].drive_flags & DRIVE_DMA) { 1622 idetim |= piix_setup_idetim_timings( 1623 mode[drive], 1, chp->channel); 1624 goto end; 1625 } 1626 } 1627 /* If we are there, none of the drives are DMA */ 1628 if (mode[0] >= 2) 1629 idetim |= piix_setup_idetim_timings( 1630 mode[0], 0, chp->channel); 1631 else 1632 idetim |= piix_setup_idetim_timings( 1633 mode[1], 0, chp->channel); 1634 end: /* 1635 * timing mode is now set up in the controller. Enable 1636 * it per-drive 1637 */ 1638 for (drive = 0; drive < 2; drive++) { 1639 /* If no drive, skip */ 1640 if ((drvp[drive].drive_flags & DRIVE) == 0) 1641 continue; 1642 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1643 if (drvp[drive].drive_flags & DRIVE_DMA) 1644 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1645 } 1646 if (idedma_ctl != 0) { 1647 /* Add software bits in status register */ 1648 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1649 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1650 idedma_ctl); 1651 } 1652 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1653 pciide_print_modes(cp); 1654 } 1655 1656 void 1657 piix3_4_setup_channel(chp) 1658 struct channel_softc *chp; 1659 { 1660 struct ata_drive_datas *drvp; 1661 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1662 struct pciide_channel *cp = (struct pciide_channel*)chp; 1663 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1664 int drive; 1665 int channel = chp->channel; 1666 1667 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1668 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1669 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1670 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1671 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1672 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1673 PIIX_SIDETIM_RTC_MASK(channel)); 1674 1675 idedma_ctl = 0; 1676 /* If channel disabled, no need to go further */ 1677 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1678 return; 1679 /* set up new idetim: Enable IDE registers decode */ 1680 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1681 1682 /* setup DMA if needed */ 1683 pciide_channel_dma_setup(cp); 1684 1685 for (drive = 0; drive < 2; drive++) { 1686 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 1687 PIIX_UDMATIM_SET(0x3, channel, drive)); 1688 drvp = &chp->ch_drive[drive]; 1689 /* If no drive, skip */ 1690 if ((drvp->drive_flags & DRIVE) == 0) 1691 continue; 1692 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1693 (drvp->drive_flags & DRIVE_UDMA) == 0)) 1694 goto pio; 1695 1696 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1697 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1698 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1699 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) { 1700 ideconf |= PIIX_CONFIG_PINGPONG; 1701 } 1702 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1703 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) { 1704 /* setup Ultra/100 */ 1705 if (drvp->UDMA_mode > 2 && 1706 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1707 drvp->UDMA_mode = 2; 1708 if (drvp->UDMA_mode > 4) { 1709 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 1710 } else { 1711 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 1712 if (drvp->UDMA_mode > 2) { 1713 ideconf |= PIIX_CONFIG_UDMA66(channel, 1714 drive); 1715 } else { 1716 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 1717 drive); 1718 } 1719 } 1720 } 1721 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 1722 /* setup Ultra/66 */ 1723 if (drvp->UDMA_mode > 2 && 1724 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1725 drvp->UDMA_mode = 2; 1726 if (drvp->UDMA_mode > 2) 1727 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 1728 else 1729 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 1730 } 1731 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1732 (drvp->drive_flags & DRIVE_UDMA)) { 1733 /* use Ultra/DMA */ 1734 drvp->drive_flags &= ~DRIVE_DMA; 1735 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 1736 udmareg |= PIIX_UDMATIM_SET( 1737 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 1738 } else { 1739 /* use Multiword DMA */ 1740 drvp->drive_flags &= ~DRIVE_UDMA; 1741 if (drive == 0) { 1742 idetim |= piix_setup_idetim_timings( 1743 drvp->DMA_mode, 1, channel); 1744 } else { 1745 sidetim |= piix_setup_sidetim_timings( 1746 drvp->DMA_mode, 1, channel); 1747 idetim =PIIX_IDETIM_SET(idetim, 1748 PIIX_IDETIM_SITRE, channel); 1749 } 1750 } 1751 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1752 1753 pio: /* use PIO mode */ 1754 idetim |= piix_setup_idetim_drvs(drvp); 1755 if (drive == 0) { 1756 idetim |= piix_setup_idetim_timings( 1757 drvp->PIO_mode, 0, channel); 1758 } else { 1759 sidetim |= piix_setup_sidetim_timings( 1760 drvp->PIO_mode, 0, channel); 1761 idetim =PIIX_IDETIM_SET(idetim, 1762 PIIX_IDETIM_SITRE, channel); 1763 } 1764 } 1765 if (idedma_ctl != 0) { 1766 /* Add software bits in status register */ 1767 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1768 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1769 idedma_ctl); 1770 } 1771 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1772 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 1773 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 1774 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 1775 pciide_print_modes(cp); 1776 } 1777 1778 1779 /* setup ISP and RTC fields, based on mode */ 1780 static u_int32_t 1781 piix_setup_idetim_timings(mode, dma, channel) 1782 u_int8_t mode; 1783 u_int8_t dma; 1784 u_int8_t channel; 1785 { 1786 1787 if (dma) 1788 return PIIX_IDETIM_SET(0, 1789 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 1790 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 1791 channel); 1792 else 1793 return PIIX_IDETIM_SET(0, 1794 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 1795 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 1796 channel); 1797 } 1798 1799 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 1800 static u_int32_t 1801 piix_setup_idetim_drvs(drvp) 1802 struct ata_drive_datas *drvp; 1803 { 1804 u_int32_t ret = 0; 1805 struct channel_softc *chp = drvp->chnl_softc; 1806 u_int8_t channel = chp->channel; 1807 u_int8_t drive = drvp->drive; 1808 1809 /* 1810 * If drive is using UDMA, timings setups are independant 1811 * So just check DMA and PIO here. 1812 */ 1813 if (drvp->drive_flags & DRIVE_DMA) { 1814 /* if mode = DMA mode 0, use compatible timings */ 1815 if ((drvp->drive_flags & DRIVE_DMA) && 1816 drvp->DMA_mode == 0) { 1817 drvp->PIO_mode = 0; 1818 return ret; 1819 } 1820 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1821 /* 1822 * PIO and DMA timings are the same, use fast timings for PIO 1823 * too, else use compat timings. 1824 */ 1825 if ((piix_isp_pio[drvp->PIO_mode] != 1826 piix_isp_dma[drvp->DMA_mode]) || 1827 (piix_rtc_pio[drvp->PIO_mode] != 1828 piix_rtc_dma[drvp->DMA_mode])) 1829 drvp->PIO_mode = 0; 1830 /* if PIO mode <= 2, use compat timings for PIO */ 1831 if (drvp->PIO_mode <= 2) { 1832 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 1833 channel); 1834 return ret; 1835 } 1836 } 1837 1838 /* 1839 * Now setup PIO modes. If mode < 2, use compat timings. 1840 * Else enable fast timings. Enable IORDY and prefetch/post 1841 * if PIO mode >= 3. 1842 */ 1843 1844 if (drvp->PIO_mode < 2) 1845 return ret; 1846 1847 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1848 if (drvp->PIO_mode >= 3) { 1849 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 1850 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 1851 } 1852 return ret; 1853 } 1854 1855 /* setup values in SIDETIM registers, based on mode */ 1856 static u_int32_t 1857 piix_setup_sidetim_timings(mode, dma, channel) 1858 u_int8_t mode; 1859 u_int8_t dma; 1860 u_int8_t channel; 1861 { 1862 if (dma) 1863 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 1864 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 1865 else 1866 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 1867 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 1868 } 1869 1870 void 1871 amd7x6_chip_map(sc, pa) 1872 struct pciide_softc *sc; 1873 struct pci_attach_args *pa; 1874 { 1875 struct pciide_channel *cp; 1876 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1877 int channel; 1878 pcireg_t chanenable; 1879 bus_size_t cmdsize, ctlsize; 1880 1881 if (pciide_chipen(sc, pa) == 0) 1882 return; 1883 printf("%s: bus-master DMA support present", 1884 sc->sc_wdcdev.sc_dev.dv_xname); 1885 pciide_mapreg_dma(sc, pa); 1886 printf("\n"); 1887 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1888 WDC_CAPABILITY_MODE; 1889 if (sc->sc_dma_ok) { 1890 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 1891 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 1892 sc->sc_wdcdev.irqack = pciide_irqack; 1893 } 1894 sc->sc_wdcdev.PIO_cap = 4; 1895 sc->sc_wdcdev.DMA_cap = 2; 1896 1897 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE) 1898 sc->sc_wdcdev.UDMA_cap = 5; 1899 else 1900 sc->sc_wdcdev.UDMA_cap = 4; 1901 sc->sc_wdcdev.set_modes = amd7x6_setup_channel; 1902 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1903 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1904 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN); 1905 1906 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable), 1907 DEBUG_PROBE); 1908 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1909 cp = &sc->pciide_channels[channel]; 1910 if (pciide_chansetup(sc, channel, interface) == 0) 1911 continue; 1912 1913 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) { 1914 printf("%s: %s channel ignored (disabled)\n", 1915 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1916 continue; 1917 } 1918 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1919 pciide_pci_intr); 1920 1921 if (pciide_chan_candisable(cp)) 1922 chanenable &= ~AMD7X6_CHAN_EN(channel); 1923 pciide_map_compat_intr(pa, cp, channel, interface); 1924 if (cp->hw_ok == 0) 1925 continue; 1926 1927 amd7x6_setup_channel(&cp->wdc_channel); 1928 } 1929 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN, 1930 chanenable); 1931 return; 1932 } 1933 1934 void 1935 amd7x6_setup_channel(chp) 1936 struct channel_softc *chp; 1937 { 1938 u_int32_t udmatim_reg, datatim_reg; 1939 u_int8_t idedma_ctl; 1940 int mode, drive; 1941 struct ata_drive_datas *drvp; 1942 struct pciide_channel *cp = (struct pciide_channel*)chp; 1943 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1944 #ifndef PCIIDE_AMD756_ENABLEDMA 1945 int rev = PCI_REVISION( 1946 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 1947 #endif 1948 1949 idedma_ctl = 0; 1950 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM); 1951 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA); 1952 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel); 1953 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel); 1954 1955 /* setup DMA if needed */ 1956 pciide_channel_dma_setup(cp); 1957 1958 for (drive = 0; drive < 2; drive++) { 1959 drvp = &chp->ch_drive[drive]; 1960 /* If no drive, skip */ 1961 if ((drvp->drive_flags & DRIVE) == 0) 1962 continue; 1963 /* add timing values, setup DMA if needed */ 1964 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1965 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 1966 mode = drvp->PIO_mode; 1967 goto pio; 1968 } 1969 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1970 (drvp->drive_flags & DRIVE_UDMA)) { 1971 /* use Ultra/DMA */ 1972 drvp->drive_flags &= ~DRIVE_DMA; 1973 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) | 1974 AMD7X6_UDMA_EN_MTH(chp->channel, drive) | 1975 AMD7X6_UDMA_TIME(chp->channel, drive, 1976 amd7x6_udma_tim[drvp->UDMA_mode]); 1977 /* can use PIO timings, MW DMA unused */ 1978 mode = drvp->PIO_mode; 1979 } else { 1980 /* use Multiword DMA, but only if revision is OK */ 1981 drvp->drive_flags &= ~DRIVE_UDMA; 1982 #ifndef PCIIDE_AMD756_ENABLEDMA 1983 /* 1984 * The workaround doesn't seem to be necessary 1985 * with all drives, so it can be disabled by 1986 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 1987 * triggered. 1988 */ 1989 if (sc->sc_pp->ide_product == 1990 PCI_PRODUCT_AMD_PBC756_IDE && 1991 AMD756_CHIPREV_DISABLEDMA(rev)) { 1992 printf("%s:%d:%d: multi-word DMA disabled due " 1993 "to chip revision\n", 1994 sc->sc_wdcdev.sc_dev.dv_xname, 1995 chp->channel, drive); 1996 mode = drvp->PIO_mode; 1997 drvp->drive_flags &= ~DRIVE_DMA; 1998 goto pio; 1999 } 2000 #endif 2001 /* mode = min(pio, dma+2) */ 2002 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2003 mode = drvp->PIO_mode; 2004 else 2005 mode = drvp->DMA_mode + 2; 2006 } 2007 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2008 2009 pio: /* setup PIO mode */ 2010 if (mode <= 2) { 2011 drvp->DMA_mode = 0; 2012 drvp->PIO_mode = 0; 2013 mode = 0; 2014 } else { 2015 drvp->PIO_mode = mode; 2016 drvp->DMA_mode = mode - 2; 2017 } 2018 datatim_reg |= 2019 AMD7X6_DATATIM_PULSE(chp->channel, drive, 2020 amd7x6_pio_set[mode]) | 2021 AMD7X6_DATATIM_RECOV(chp->channel, drive, 2022 amd7x6_pio_rec[mode]); 2023 } 2024 if (idedma_ctl != 0) { 2025 /* Add software bits in status register */ 2026 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2027 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2028 idedma_ctl); 2029 } 2030 pciide_print_modes(cp); 2031 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg); 2032 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg); 2033 } 2034 2035 void 2036 apollo_chip_map(sc, pa) 2037 struct pciide_softc *sc; 2038 struct pci_attach_args *pa; 2039 { 2040 struct pciide_channel *cp; 2041 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2042 int channel; 2043 u_int32_t ideconf; 2044 bus_size_t cmdsize, ctlsize; 2045 pcitag_t pcib_tag; 2046 pcireg_t pcib_id, pcib_class; 2047 2048 if (pciide_chipen(sc, pa) == 0) 2049 return; 2050 /* get a PCI tag for the ISA bridge (function 0 of the same device) */ 2051 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 2052 /* and read ID and rev of the ISA bridge */ 2053 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG); 2054 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG); 2055 printf(": VIA Technologies "); 2056 switch (PCI_PRODUCT(pcib_id)) { 2057 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 2058 printf("VT82C586 (Apollo VP) "); 2059 if(PCI_REVISION(pcib_class) >= 0x02) { 2060 printf("ATA33 controller\n"); 2061 sc->sc_wdcdev.UDMA_cap = 2; 2062 } else { 2063 printf("controller\n"); 2064 sc->sc_wdcdev.UDMA_cap = 0; 2065 } 2066 break; 2067 case PCI_PRODUCT_VIATECH_VT82C596A: 2068 printf("VT82C596A (Apollo Pro) "); 2069 if (PCI_REVISION(pcib_class) >= 0x12) { 2070 printf("ATA66 controller\n"); 2071 sc->sc_wdcdev.UDMA_cap = 4; 2072 } else { 2073 printf("ATA33 controller\n"); 2074 sc->sc_wdcdev.UDMA_cap = 2; 2075 } 2076 break; 2077 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 2078 printf("VT82C686A (Apollo KX133) "); 2079 if (PCI_REVISION(pcib_class) >= 0x40) { 2080 printf("ATA100 controller\n"); 2081 sc->sc_wdcdev.UDMA_cap = 5; 2082 } else { 2083 printf("ATA66 controller\n"); 2084 sc->sc_wdcdev.UDMA_cap = 4; 2085 } 2086 break; 2087 default: 2088 printf("unknown ATA controller\n"); 2089 sc->sc_wdcdev.UDMA_cap = 0; 2090 } 2091 2092 printf("%s: bus-master DMA support present", 2093 sc->sc_wdcdev.sc_dev.dv_xname); 2094 pciide_mapreg_dma(sc, pa); 2095 printf("\n"); 2096 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2097 WDC_CAPABILITY_MODE; 2098 if (sc->sc_dma_ok) { 2099 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2100 sc->sc_wdcdev.irqack = pciide_irqack; 2101 if (sc->sc_wdcdev.UDMA_cap > 0) 2102 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2103 } 2104 sc->sc_wdcdev.PIO_cap = 4; 2105 sc->sc_wdcdev.DMA_cap = 2; 2106 sc->sc_wdcdev.set_modes = apollo_setup_channel; 2107 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2108 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2109 2110 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 2111 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2112 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 2113 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 2114 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2115 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 2116 DEBUG_PROBE); 2117 2118 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2119 cp = &sc->pciide_channels[channel]; 2120 if (pciide_chansetup(sc, channel, interface) == 0) 2121 continue; 2122 2123 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 2124 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 2125 printf("%s: %s channel ignored (disabled)\n", 2126 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2127 continue; 2128 } 2129 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2130 pciide_pci_intr); 2131 if (cp->hw_ok == 0) 2132 continue; 2133 if (pciide_chan_candisable(cp)) { 2134 ideconf &= ~APO_IDECONF_EN(channel); 2135 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 2136 ideconf); 2137 } 2138 pciide_map_compat_intr(pa, cp, channel, interface); 2139 2140 if (cp->hw_ok == 0) 2141 continue; 2142 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 2143 } 2144 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2145 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2146 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 2147 } 2148 2149 void 2150 apollo_setup_channel(chp) 2151 struct channel_softc *chp; 2152 { 2153 u_int32_t udmatim_reg, datatim_reg; 2154 u_int8_t idedma_ctl; 2155 int mode, drive; 2156 struct ata_drive_datas *drvp; 2157 struct pciide_channel *cp = (struct pciide_channel*)chp; 2158 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2159 2160 idedma_ctl = 0; 2161 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 2162 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 2163 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 2164 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 2165 2166 /* setup DMA if needed */ 2167 pciide_channel_dma_setup(cp); 2168 2169 for (drive = 0; drive < 2; drive++) { 2170 drvp = &chp->ch_drive[drive]; 2171 /* If no drive, skip */ 2172 if ((drvp->drive_flags & DRIVE) == 0) 2173 continue; 2174 /* add timing values, setup DMA if needed */ 2175 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2176 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2177 mode = drvp->PIO_mode; 2178 goto pio; 2179 } 2180 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2181 (drvp->drive_flags & DRIVE_UDMA)) { 2182 /* use Ultra/DMA */ 2183 drvp->drive_flags &= ~DRIVE_DMA; 2184 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2185 APO_UDMA_EN_MTH(chp->channel, drive); 2186 if (sc->sc_wdcdev.UDMA_cap == 5) { 2187 /* 686b */ 2188 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 2189 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2190 drive, apollo_udma100_tim[drvp->UDMA_mode]); 2191 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 2192 /* 596b or 686a */ 2193 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 2194 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2195 drive, apollo_udma66_tim[drvp->UDMA_mode]); 2196 } else { 2197 /* 596a or 586b */ 2198 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2199 drive, apollo_udma33_tim[drvp->UDMA_mode]); 2200 } 2201 /* can use PIO timings, MW DMA unused */ 2202 mode = drvp->PIO_mode; 2203 } else { 2204 /* use Multiword DMA */ 2205 drvp->drive_flags &= ~DRIVE_UDMA; 2206 /* mode = min(pio, dma+2) */ 2207 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2208 mode = drvp->PIO_mode; 2209 else 2210 mode = drvp->DMA_mode + 2; 2211 } 2212 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2213 2214 pio: /* setup PIO mode */ 2215 if (mode <= 2) { 2216 drvp->DMA_mode = 0; 2217 drvp->PIO_mode = 0; 2218 mode = 0; 2219 } else { 2220 drvp->PIO_mode = mode; 2221 drvp->DMA_mode = mode - 2; 2222 } 2223 datatim_reg |= 2224 APO_DATATIM_PULSE(chp->channel, drive, 2225 apollo_pio_set[mode]) | 2226 APO_DATATIM_RECOV(chp->channel, drive, 2227 apollo_pio_rec[mode]); 2228 } 2229 if (idedma_ctl != 0) { 2230 /* Add software bits in status register */ 2231 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2232 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2233 idedma_ctl); 2234 } 2235 pciide_print_modes(cp); 2236 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2237 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2238 } 2239 2240 void 2241 cmd_channel_map(pa, sc, channel) 2242 struct pci_attach_args *pa; 2243 struct pciide_softc *sc; 2244 int channel; 2245 { 2246 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2247 bus_size_t cmdsize, ctlsize; 2248 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2249 int interface; 2250 2251 /* 2252 * The 0648/0649 can be told to identify as a RAID controller. 2253 * In this case, we have to fake interface 2254 */ 2255 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2256 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2257 PCIIDE_INTERFACE_SETTABLE(1); 2258 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2259 CMD_CONF_DSA1) 2260 interface |= PCIIDE_INTERFACE_PCI(0) | 2261 PCIIDE_INTERFACE_PCI(1); 2262 } else { 2263 interface = PCI_INTERFACE(pa->pa_class); 2264 } 2265 2266 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2267 cp->name = PCIIDE_CHANNEL_NAME(channel); 2268 cp->wdc_channel.channel = channel; 2269 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2270 2271 if (channel > 0) { 2272 cp->wdc_channel.ch_queue = 2273 sc->pciide_channels[0].wdc_channel.ch_queue; 2274 } else { 2275 cp->wdc_channel.ch_queue = 2276 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2277 } 2278 if (cp->wdc_channel.ch_queue == NULL) { 2279 printf("%s %s channel: " 2280 "can't allocate memory for command queue", 2281 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2282 return; 2283 } 2284 2285 printf("%s: %s channel %s to %s mode\n", 2286 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2287 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2288 "configured" : "wired", 2289 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2290 "native-PCI" : "compatibility"); 2291 2292 /* 2293 * with a CMD PCI64x, if we get here, the first channel is enabled: 2294 * there's no way to disable the first channel without disabling 2295 * the whole device 2296 */ 2297 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2298 printf("%s: %s channel ignored (disabled)\n", 2299 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2300 return; 2301 } 2302 2303 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2304 if (cp->hw_ok == 0) 2305 return; 2306 if (channel == 1) { 2307 if (pciide_chan_candisable(cp)) { 2308 ctrl &= ~CMD_CTRL_2PORT; 2309 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2310 CMD_CTRL, ctrl); 2311 } 2312 } 2313 pciide_map_compat_intr(pa, cp, channel, interface); 2314 } 2315 2316 int 2317 cmd_pci_intr(arg) 2318 void *arg; 2319 { 2320 struct pciide_softc *sc = arg; 2321 struct pciide_channel *cp; 2322 struct channel_softc *wdc_cp; 2323 int i, rv, crv; 2324 u_int32_t priirq, secirq; 2325 2326 rv = 0; 2327 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2328 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2329 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2330 cp = &sc->pciide_channels[i]; 2331 wdc_cp = &cp->wdc_channel; 2332 /* If a compat channel skip. */ 2333 if (cp->compat) 2334 continue; 2335 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2336 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2337 crv = wdcintr(wdc_cp); 2338 if (crv == 0) 2339 printf("%s:%d: bogus intr\n", 2340 sc->sc_wdcdev.sc_dev.dv_xname, i); 2341 else 2342 rv = 1; 2343 } 2344 } 2345 return rv; 2346 } 2347 2348 void 2349 cmd_chip_map(sc, pa) 2350 struct pciide_softc *sc; 2351 struct pci_attach_args *pa; 2352 { 2353 int channel; 2354 2355 /* 2356 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2357 * and base adresses registers can be disabled at 2358 * hardware level. In this case, the device is wired 2359 * in compat mode and its first channel is always enabled, 2360 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2361 * In fact, it seems that the first channel of the CMD PCI0640 2362 * can't be disabled. 2363 */ 2364 2365 #ifdef PCIIDE_CMD064x_DISABLE 2366 if (pciide_chipen(sc, pa) == 0) 2367 return; 2368 #endif 2369 2370 printf("%s: hardware does not support DMA\n", 2371 sc->sc_wdcdev.sc_dev.dv_xname); 2372 sc->sc_dma_ok = 0; 2373 2374 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2375 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2376 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2377 2378 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2379 cmd_channel_map(pa, sc, channel); 2380 } 2381 } 2382 2383 void 2384 cmd0643_9_chip_map(sc, pa) 2385 struct pciide_softc *sc; 2386 struct pci_attach_args *pa; 2387 { 2388 struct pciide_channel *cp; 2389 int channel; 2390 int rev = PCI_REVISION( 2391 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2392 2393 /* 2394 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2395 * and base adresses registers can be disabled at 2396 * hardware level. In this case, the device is wired 2397 * in compat mode and its first channel is always enabled, 2398 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2399 * In fact, it seems that the first channel of the CMD PCI0640 2400 * can't be disabled. 2401 */ 2402 2403 #ifdef PCIIDE_CMD064x_DISABLE 2404 if (pciide_chipen(sc, pa) == 0) 2405 return; 2406 #endif 2407 printf("%s: bus-master DMA support present", 2408 sc->sc_wdcdev.sc_dev.dv_xname); 2409 pciide_mapreg_dma(sc, pa); 2410 printf("\n"); 2411 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2412 WDC_CAPABILITY_MODE; 2413 if (sc->sc_dma_ok) { 2414 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2415 switch (sc->sc_pp->ide_product) { 2416 case PCI_PRODUCT_CMDTECH_649: 2417 case PCI_PRODUCT_CMDTECH_648: 2418 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2419 sc->sc_wdcdev.UDMA_cap = 4; 2420 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2421 break; 2422 case PCI_PRODUCT_CMDTECH_646: 2423 if (rev >= CMD0646U2_REV) { 2424 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2425 sc->sc_wdcdev.UDMA_cap = 2; 2426 } else if (rev >= CMD0646U_REV) { 2427 /* 2428 * Linux's driver claims that the 646U is broken 2429 * with UDMA. Only enable it if we know what we're 2430 * doing 2431 */ 2432 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2433 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2434 sc->sc_wdcdev.UDMA_cap = 2; 2435 #endif 2436 /* explicitely disable UDMA */ 2437 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2438 CMD_UDMATIM(0), 0); 2439 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2440 CMD_UDMATIM(1), 0); 2441 } 2442 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2443 break; 2444 default: 2445 sc->sc_wdcdev.irqack = pciide_irqack; 2446 } 2447 } 2448 2449 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2450 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2451 sc->sc_wdcdev.PIO_cap = 4; 2452 sc->sc_wdcdev.DMA_cap = 2; 2453 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2454 2455 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2456 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2457 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2458 DEBUG_PROBE); 2459 2460 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2461 cp = &sc->pciide_channels[channel]; 2462 cmd_channel_map(pa, sc, channel); 2463 if (cp->hw_ok == 0) 2464 continue; 2465 cmd0643_9_setup_channel(&cp->wdc_channel); 2466 } 2467 /* 2468 * note - this also makes sure we clear the irq disable and reset 2469 * bits 2470 */ 2471 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2472 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2473 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2474 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2475 DEBUG_PROBE); 2476 } 2477 2478 void 2479 cmd0643_9_setup_channel(chp) 2480 struct channel_softc *chp; 2481 { 2482 struct ata_drive_datas *drvp; 2483 u_int8_t tim; 2484 u_int32_t idedma_ctl, udma_reg; 2485 int drive; 2486 struct pciide_channel *cp = (struct pciide_channel*)chp; 2487 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2488 2489 idedma_ctl = 0; 2490 /* setup DMA if needed */ 2491 pciide_channel_dma_setup(cp); 2492 2493 for (drive = 0; drive < 2; drive++) { 2494 drvp = &chp->ch_drive[drive]; 2495 /* If no drive, skip */ 2496 if ((drvp->drive_flags & DRIVE) == 0) 2497 continue; 2498 /* add timing values, setup DMA if needed */ 2499 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2500 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2501 if (drvp->drive_flags & DRIVE_UDMA) { 2502 /* UltraDMA on a 646U2, 0648 or 0649 */ 2503 drvp->drive_flags &= ~DRIVE_DMA; 2504 udma_reg = pciide_pci_read(sc->sc_pc, 2505 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2506 if (drvp->UDMA_mode > 2 && 2507 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2508 CMD_BICSR) & 2509 CMD_BICSR_80(chp->channel)) == 0) 2510 drvp->UDMA_mode = 2; 2511 if (drvp->UDMA_mode > 2) 2512 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2513 else if (sc->sc_wdcdev.UDMA_cap > 2) 2514 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2515 udma_reg |= CMD_UDMATIM_UDMA(drive); 2516 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2517 CMD_UDMATIM_TIM_OFF(drive)); 2518 udma_reg |= 2519 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 2520 CMD_UDMATIM_TIM_OFF(drive)); 2521 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2522 CMD_UDMATIM(chp->channel), udma_reg); 2523 } else { 2524 /* 2525 * use Multiword DMA. 2526 * Timings will be used for both PIO and DMA, 2527 * so adjust DMA mode if needed 2528 * if we have a 0646U2/8/9, turn off UDMA 2529 */ 2530 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2531 udma_reg = pciide_pci_read(sc->sc_pc, 2532 sc->sc_tag, 2533 CMD_UDMATIM(chp->channel)); 2534 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2535 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2536 CMD_UDMATIM(chp->channel), 2537 udma_reg); 2538 } 2539 if (drvp->PIO_mode >= 3 && 2540 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2541 drvp->DMA_mode = drvp->PIO_mode - 2; 2542 } 2543 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2544 } 2545 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2546 } 2547 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2548 CMD_DATA_TIM(chp->channel, drive), tim); 2549 } 2550 if (idedma_ctl != 0) { 2551 /* Add software bits in status register */ 2552 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2553 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2554 idedma_ctl); 2555 } 2556 pciide_print_modes(cp); 2557 } 2558 2559 void 2560 cmd646_9_irqack(chp) 2561 struct channel_softc *chp; 2562 { 2563 u_int32_t priirq, secirq; 2564 struct pciide_channel *cp = (struct pciide_channel*)chp; 2565 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2566 2567 if (chp->channel == 0) { 2568 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2569 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2570 } else { 2571 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2572 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2573 } 2574 pciide_irqack(chp); 2575 } 2576 2577 void 2578 cy693_chip_map(sc, pa) 2579 struct pciide_softc *sc; 2580 struct pci_attach_args *pa; 2581 { 2582 struct pciide_channel *cp; 2583 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2584 bus_size_t cmdsize, ctlsize; 2585 2586 if (pciide_chipen(sc, pa) == 0) 2587 return; 2588 /* 2589 * this chip has 2 PCI IDE functions, one for primary and one for 2590 * secondary. So we need to call pciide_mapregs_compat() with 2591 * the real channel 2592 */ 2593 if (pa->pa_function == 1) { 2594 sc->sc_cy_compatchan = 0; 2595 } else if (pa->pa_function == 2) { 2596 sc->sc_cy_compatchan = 1; 2597 } else { 2598 printf("%s: unexpected PCI function %d\n", 2599 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2600 return; 2601 } 2602 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2603 printf("%s: bus-master DMA support present", 2604 sc->sc_wdcdev.sc_dev.dv_xname); 2605 pciide_mapreg_dma(sc, pa); 2606 } else { 2607 printf("%s: hardware does not support DMA", 2608 sc->sc_wdcdev.sc_dev.dv_xname); 2609 sc->sc_dma_ok = 0; 2610 } 2611 printf("\n"); 2612 2613 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 2614 if (sc->sc_cy_handle == NULL) { 2615 printf("%s: unable to map hyperCache control registers\n", 2616 sc->sc_wdcdev.sc_dev.dv_xname); 2617 sc->sc_dma_ok = 0; 2618 } 2619 2620 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2621 WDC_CAPABILITY_MODE; 2622 if (sc->sc_dma_ok) { 2623 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2624 sc->sc_wdcdev.irqack = pciide_irqack; 2625 } 2626 sc->sc_wdcdev.PIO_cap = 4; 2627 sc->sc_wdcdev.DMA_cap = 2; 2628 sc->sc_wdcdev.set_modes = cy693_setup_channel; 2629 2630 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2631 sc->sc_wdcdev.nchannels = 1; 2632 2633 /* Only one channel for this chip; if we are here it's enabled */ 2634 cp = &sc->pciide_channels[0]; 2635 sc->wdc_chanarray[0] = &cp->wdc_channel; 2636 cp->name = PCIIDE_CHANNEL_NAME(0); 2637 cp->wdc_channel.channel = 0; 2638 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2639 cp->wdc_channel.ch_queue = 2640 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2641 if (cp->wdc_channel.ch_queue == NULL) { 2642 printf("%s primary channel: " 2643 "can't allocate memory for command queue", 2644 sc->sc_wdcdev.sc_dev.dv_xname); 2645 return; 2646 } 2647 printf("%s: primary channel %s to ", 2648 sc->sc_wdcdev.sc_dev.dv_xname, 2649 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 2650 "configured" : "wired"); 2651 if (interface & PCIIDE_INTERFACE_PCI(0)) { 2652 printf("native-PCI"); 2653 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 2654 pciide_pci_intr); 2655 } else { 2656 printf("compatibility"); 2657 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 2658 &cmdsize, &ctlsize); 2659 } 2660 printf(" mode\n"); 2661 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2662 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2663 wdcattach(&cp->wdc_channel); 2664 if (pciide_chan_candisable(cp)) { 2665 pci_conf_write(sc->sc_pc, sc->sc_tag, 2666 PCI_COMMAND_STATUS_REG, 0); 2667 } 2668 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 2669 if (cp->hw_ok == 0) 2670 return; 2671 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 2672 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 2673 cy693_setup_channel(&cp->wdc_channel); 2674 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 2675 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 2676 } 2677 2678 void 2679 cy693_setup_channel(chp) 2680 struct channel_softc *chp; 2681 { 2682 struct ata_drive_datas *drvp; 2683 int drive; 2684 u_int32_t cy_cmd_ctrl; 2685 u_int32_t idedma_ctl; 2686 struct pciide_channel *cp = (struct pciide_channel*)chp; 2687 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2688 int dma_mode = -1; 2689 2690 cy_cmd_ctrl = idedma_ctl = 0; 2691 2692 /* setup DMA if needed */ 2693 pciide_channel_dma_setup(cp); 2694 2695 for (drive = 0; drive < 2; drive++) { 2696 drvp = &chp->ch_drive[drive]; 2697 /* If no drive, skip */ 2698 if ((drvp->drive_flags & DRIVE) == 0) 2699 continue; 2700 /* add timing values, setup DMA if needed */ 2701 if (drvp->drive_flags & DRIVE_DMA) { 2702 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2703 /* use Multiword DMA */ 2704 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 2705 dma_mode = drvp->DMA_mode; 2706 } 2707 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2708 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 2709 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2710 CY_CMD_CTRL_IOW_REC_OFF(drive)); 2711 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2712 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 2713 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2714 CY_CMD_CTRL_IOR_REC_OFF(drive)); 2715 } 2716 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 2717 chp->ch_drive[0].DMA_mode = dma_mode; 2718 chp->ch_drive[1].DMA_mode = dma_mode; 2719 2720 if (dma_mode == -1) 2721 dma_mode = 0; 2722 2723 if (sc->sc_cy_handle != NULL) { 2724 /* Note: `multiple' is implied. */ 2725 cy82c693_write(sc->sc_cy_handle, 2726 (sc->sc_cy_compatchan == 0) ? 2727 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 2728 } 2729 2730 pciide_print_modes(cp); 2731 2732 if (idedma_ctl != 0) { 2733 /* Add software bits in status register */ 2734 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2735 IDEDMA_CTL, idedma_ctl); 2736 } 2737 } 2738 2739 void 2740 sis_chip_map(sc, pa) 2741 struct pciide_softc *sc; 2742 struct pci_attach_args *pa; 2743 { 2744 struct pciide_channel *cp; 2745 int channel; 2746 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 2747 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2748 pcireg_t rev = PCI_REVISION(pa->pa_class); 2749 bus_size_t cmdsize, ctlsize; 2750 pcitag_t pchb_tag; 2751 pcireg_t pchb_id, pchb_class; 2752 2753 if (pciide_chipen(sc, pa) == 0) 2754 return; 2755 printf("%s: bus-master DMA support present", 2756 sc->sc_wdcdev.sc_dev.dv_xname); 2757 pciide_mapreg_dma(sc, pa); 2758 printf("\n"); 2759 2760 /* get a PCI tag for the host bridge (function 0 of the same device) */ 2761 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 2762 /* and read ID and rev of the ISA bridge */ 2763 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG); 2764 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG); 2765 2766 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2767 WDC_CAPABILITY_MODE; 2768 if (sc->sc_dma_ok) { 2769 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2770 sc->sc_wdcdev.irqack = pciide_irqack; 2771 /* 2772 * controllers associated to a rev 0x2 530 Host to PCI Bridge 2773 * have problems with UDMA (info provided by Christos) 2774 */ 2775 if (rev >= 0xd0 && 2776 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB || 2777 PCI_REVISION(pchb_class) >= 0x03)) 2778 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2779 } 2780 2781 sc->sc_wdcdev.PIO_cap = 4; 2782 sc->sc_wdcdev.DMA_cap = 2; 2783 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) 2784 sc->sc_wdcdev.UDMA_cap = 2; 2785 sc->sc_wdcdev.set_modes = sis_setup_channel; 2786 2787 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2788 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2789 2790 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 2791 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 2792 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE); 2793 2794 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2795 cp = &sc->pciide_channels[channel]; 2796 if (pciide_chansetup(sc, channel, interface) == 0) 2797 continue; 2798 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 2799 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 2800 printf("%s: %s channel ignored (disabled)\n", 2801 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2802 continue; 2803 } 2804 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2805 pciide_pci_intr); 2806 if (cp->hw_ok == 0) 2807 continue; 2808 if (pciide_chan_candisable(cp)) { 2809 if (channel == 0) 2810 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 2811 else 2812 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 2813 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 2814 sis_ctr0); 2815 } 2816 pciide_map_compat_intr(pa, cp, channel, interface); 2817 if (cp->hw_ok == 0) 2818 continue; 2819 sis_setup_channel(&cp->wdc_channel); 2820 } 2821 } 2822 2823 void 2824 sis_setup_channel(chp) 2825 struct channel_softc *chp; 2826 { 2827 struct ata_drive_datas *drvp; 2828 int drive; 2829 u_int32_t sis_tim; 2830 u_int32_t idedma_ctl; 2831 struct pciide_channel *cp = (struct pciide_channel*)chp; 2832 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2833 2834 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 2835 "channel %d 0x%x\n", chp->channel, 2836 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 2837 DEBUG_PROBE); 2838 sis_tim = 0; 2839 idedma_ctl = 0; 2840 /* setup DMA if needed */ 2841 pciide_channel_dma_setup(cp); 2842 2843 for (drive = 0; drive < 2; drive++) { 2844 drvp = &chp->ch_drive[drive]; 2845 /* If no drive, skip */ 2846 if ((drvp->drive_flags & DRIVE) == 0) 2847 continue; 2848 /* add timing values, setup DMA if needed */ 2849 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2850 (drvp->drive_flags & DRIVE_UDMA) == 0) 2851 goto pio; 2852 2853 if (drvp->drive_flags & DRIVE_UDMA) { 2854 /* use Ultra/DMA */ 2855 drvp->drive_flags &= ~DRIVE_DMA; 2856 sis_tim |= sis_udma_tim[drvp->UDMA_mode] << 2857 SIS_TIM_UDMA_TIME_OFF(drive); 2858 sis_tim |= SIS_TIM_UDMA_EN(drive); 2859 } else { 2860 /* 2861 * use Multiword DMA 2862 * Timings will be used for both PIO and DMA, 2863 * so adjust DMA mode if needed 2864 */ 2865 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2866 drvp->PIO_mode = drvp->DMA_mode + 2; 2867 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2868 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2869 drvp->PIO_mode - 2 : 0; 2870 if (drvp->DMA_mode == 0) 2871 drvp->PIO_mode = 0; 2872 } 2873 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2874 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] << 2875 SIS_TIM_ACT_OFF(drive); 2876 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 2877 SIS_TIM_REC_OFF(drive); 2878 } 2879 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 2880 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 2881 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 2882 if (idedma_ctl != 0) { 2883 /* Add software bits in status register */ 2884 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2885 IDEDMA_CTL, idedma_ctl); 2886 } 2887 pciide_print_modes(cp); 2888 } 2889 2890 void 2891 acer_chip_map(sc, pa) 2892 struct pciide_softc *sc; 2893 struct pci_attach_args *pa; 2894 { 2895 struct pciide_channel *cp; 2896 int channel; 2897 pcireg_t cr, interface; 2898 bus_size_t cmdsize, ctlsize; 2899 pcireg_t rev = PCI_REVISION(pa->pa_class); 2900 2901 if (pciide_chipen(sc, pa) == 0) 2902 return; 2903 printf("%s: bus-master DMA support present", 2904 sc->sc_wdcdev.sc_dev.dv_xname); 2905 pciide_mapreg_dma(sc, pa); 2906 printf("\n"); 2907 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2908 WDC_CAPABILITY_MODE; 2909 if (sc->sc_dma_ok) { 2910 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 2911 if (rev >= 0x20) { 2912 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2913 if (rev >= 0xC4) 2914 sc->sc_wdcdev.UDMA_cap = 5; 2915 else if (rev >= 0xC2) 2916 sc->sc_wdcdev.UDMA_cap = 4; 2917 else 2918 sc->sc_wdcdev.UDMA_cap = 2; 2919 } 2920 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2921 sc->sc_wdcdev.irqack = pciide_irqack; 2922 } 2923 2924 sc->sc_wdcdev.PIO_cap = 4; 2925 sc->sc_wdcdev.DMA_cap = 2; 2926 sc->sc_wdcdev.set_modes = acer_setup_channel; 2927 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2928 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2929 2930 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 2931 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 2932 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 2933 2934 /* Enable "microsoft register bits" R/W. */ 2935 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 2936 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 2937 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 2938 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 2939 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 2940 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 2941 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 2942 ~ACER_CHANSTATUSREGS_RO); 2943 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 2944 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 2945 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 2946 /* Don't use cr, re-read the real register content instead */ 2947 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 2948 PCI_CLASS_REG)); 2949 2950 /* From linux: enable "Cable Detection" */ 2951 if (rev >= 0xC2) { 2952 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 2953 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 2954 | ACER_0x4B_CDETECT); 2955 /* set south-bridge's enable bit, m1533, 0x79 */ 2956 if (rev == 0xC2) 2957 /* 1543C-B0 (m1533, 0x79, bit 2) */ 2958 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x79, 2959 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x79) 2960 | ACER_0x79_REVC2_EN); 2961 else 2962 /* 1553/1535 (m1533, 0x79, bit 1) */ 2963 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x79, 2964 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x79) 2965 | ACER_0x79_EN); 2966 } 2967 2968 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2969 cp = &sc->pciide_channels[channel]; 2970 if (pciide_chansetup(sc, channel, interface) == 0) 2971 continue; 2972 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 2973 printf("%s: %s channel ignored (disabled)\n", 2974 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2975 continue; 2976 } 2977 /* newer controllers seems to lack the ACER_CHIDS. Sigh */ 2978 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2979 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 2980 if (cp->hw_ok == 0) 2981 continue; 2982 if (pciide_chan_candisable(cp)) { 2983 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 2984 pci_conf_write(sc->sc_pc, sc->sc_tag, 2985 PCI_CLASS_REG, cr); 2986 } 2987 pciide_map_compat_intr(pa, cp, channel, interface); 2988 acer_setup_channel(&cp->wdc_channel); 2989 } 2990 } 2991 2992 void 2993 acer_setup_channel(chp) 2994 struct channel_softc *chp; 2995 { 2996 struct ata_drive_datas *drvp; 2997 int drive; 2998 u_int32_t acer_fifo_udma; 2999 u_int32_t idedma_ctl; 3000 struct pciide_channel *cp = (struct pciide_channel*)chp; 3001 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3002 3003 idedma_ctl = 0; 3004 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 3005 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 3006 acer_fifo_udma), DEBUG_PROBE); 3007 /* setup DMA if needed */ 3008 pciide_channel_dma_setup(cp); 3009 3010 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 3011 DRIVE_UDMA) { /* check 80 pins cable */ 3012 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 3013 ACER_0x4A_80PIN(chp->channel)) { 3014 if (chp->ch_drive[0].UDMA_mode > 2) 3015 chp->ch_drive[0].UDMA_mode = 2; 3016 if (chp->ch_drive[1].UDMA_mode > 2) 3017 chp->ch_drive[1].UDMA_mode = 2; 3018 } 3019 } 3020 3021 for (drive = 0; drive < 2; drive++) { 3022 drvp = &chp->ch_drive[drive]; 3023 /* If no drive, skip */ 3024 if ((drvp->drive_flags & DRIVE) == 0) 3025 continue; 3026 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 3027 "channel %d drive %d 0x%x\n", chp->channel, drive, 3028 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3029 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 3030 /* clear FIFO/DMA mode */ 3031 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 3032 ACER_UDMA_EN(chp->channel, drive) | 3033 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 3034 3035 /* add timing values, setup DMA if needed */ 3036 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 3037 (drvp->drive_flags & DRIVE_UDMA) == 0) { 3038 acer_fifo_udma |= 3039 ACER_FTH_OPL(chp->channel, drive, 0x1); 3040 goto pio; 3041 } 3042 3043 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 3044 if (drvp->drive_flags & DRIVE_UDMA) { 3045 /* use Ultra/DMA */ 3046 drvp->drive_flags &= ~DRIVE_DMA; 3047 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 3048 acer_fifo_udma |= 3049 ACER_UDMA_TIM(chp->channel, drive, 3050 acer_udma[drvp->UDMA_mode]); 3051 /* XXX disable if one drive < UDMA3 ? */ 3052 if (drvp->UDMA_mode >= 3) { 3053 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3054 ACER_0x4B, 3055 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3056 ACER_0x4B) | ACER_0x4B_UDMA66); 3057 } 3058 } else { 3059 /* 3060 * use Multiword DMA 3061 * Timings will be used for both PIO and DMA, 3062 * so adjust DMA mode if needed 3063 */ 3064 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3065 drvp->PIO_mode = drvp->DMA_mode + 2; 3066 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3067 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3068 drvp->PIO_mode - 2 : 0; 3069 if (drvp->DMA_mode == 0) 3070 drvp->PIO_mode = 0; 3071 } 3072 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3073 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 3074 ACER_IDETIM(chp->channel, drive), 3075 acer_pio[drvp->PIO_mode]); 3076 } 3077 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 3078 acer_fifo_udma), DEBUG_PROBE); 3079 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 3080 if (idedma_ctl != 0) { 3081 /* Add software bits in status register */ 3082 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3083 IDEDMA_CTL, idedma_ctl); 3084 } 3085 pciide_print_modes(cp); 3086 } 3087 3088 int 3089 acer_pci_intr(arg) 3090 void *arg; 3091 { 3092 struct pciide_softc *sc = arg; 3093 struct pciide_channel *cp; 3094 struct channel_softc *wdc_cp; 3095 int i, rv, crv; 3096 u_int32_t chids; 3097 3098 rv = 0; 3099 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 3100 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3101 cp = &sc->pciide_channels[i]; 3102 wdc_cp = &cp->wdc_channel; 3103 /* If a compat channel skip. */ 3104 if (cp->compat) 3105 continue; 3106 if (chids & ACER_CHIDS_INT(i)) { 3107 crv = wdcintr(wdc_cp); 3108 if (crv == 0) 3109 printf("%s:%d: bogus intr\n", 3110 sc->sc_wdcdev.sc_dev.dv_xname, i); 3111 else 3112 rv = 1; 3113 } 3114 } 3115 return rv; 3116 } 3117 3118 void 3119 hpt_chip_map(sc, pa) 3120 struct pciide_softc *sc; 3121 struct pci_attach_args *pa; 3122 { 3123 struct pciide_channel *cp; 3124 int i, compatchan, revision; 3125 pcireg_t interface; 3126 bus_size_t cmdsize, ctlsize; 3127 3128 if (pciide_chipen(sc, pa) == 0) 3129 return; 3130 revision = PCI_REVISION(pa->pa_class); 3131 printf(": Triones/Highpoint "); 3132 if (revision == HPT370_REV) 3133 printf("HPT370 IDE Controller\n"); 3134 else if (revision == HPT370A_REV) 3135 printf("HPT370A IDE Controller\n"); 3136 else if (revision == HPT366_REV) 3137 printf("HPT366 IDE Controller\n"); 3138 else 3139 printf("unknown HPT IDE controller rev %d\n", revision); 3140 3141 /* 3142 * when the chip is in native mode it identifies itself as a 3143 * 'misc mass storage'. Fake interface in this case. 3144 */ 3145 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3146 interface = PCI_INTERFACE(pa->pa_class); 3147 } else { 3148 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3149 PCIIDE_INTERFACE_PCI(0); 3150 if (revision == HPT370_REV || revision == HPT370A_REV) 3151 interface |= PCIIDE_INTERFACE_PCI(1); 3152 } 3153 3154 printf("%s: bus-master DMA support present", 3155 sc->sc_wdcdev.sc_dev.dv_xname); 3156 pciide_mapreg_dma(sc, pa); 3157 printf("\n"); 3158 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3159 WDC_CAPABILITY_MODE; 3160 if (sc->sc_dma_ok) { 3161 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3162 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3163 sc->sc_wdcdev.irqack = pciide_irqack; 3164 } 3165 sc->sc_wdcdev.PIO_cap = 4; 3166 sc->sc_wdcdev.DMA_cap = 2; 3167 3168 sc->sc_wdcdev.set_modes = hpt_setup_channel; 3169 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3170 if (revision == HPT366_REV) { 3171 sc->sc_wdcdev.UDMA_cap = 4; 3172 /* 3173 * The 366 has 2 PCI IDE functions, one for primary and one 3174 * for secondary. So we need to call pciide_mapregs_compat() 3175 * with the real channel 3176 */ 3177 if (pa->pa_function == 0) { 3178 compatchan = 0; 3179 } else if (pa->pa_function == 1) { 3180 compatchan = 1; 3181 } else { 3182 printf("%s: unexpected PCI function %d\n", 3183 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 3184 return; 3185 } 3186 sc->sc_wdcdev.nchannels = 1; 3187 } else { 3188 sc->sc_wdcdev.nchannels = 2; 3189 sc->sc_wdcdev.UDMA_cap = 5; 3190 } 3191 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3192 cp = &sc->pciide_channels[i]; 3193 if (sc->sc_wdcdev.nchannels > 1) { 3194 compatchan = i; 3195 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 3196 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 3197 printf("%s: %s channel ignored (disabled)\n", 3198 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3199 continue; 3200 } 3201 } 3202 if (pciide_chansetup(sc, i, interface) == 0) 3203 continue; 3204 if (interface & PCIIDE_INTERFACE_PCI(i)) { 3205 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 3206 &ctlsize, hpt_pci_intr); 3207 } else { 3208 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 3209 &cmdsize, &ctlsize); 3210 } 3211 if (cp->hw_ok == 0) 3212 return; 3213 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 3214 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 3215 wdcattach(&cp->wdc_channel); 3216 hpt_setup_channel(&cp->wdc_channel); 3217 } 3218 if (revision == HPT370_REV || revision == HPT370A_REV) { 3219 /* 3220 * HPT370_REV has a bit to disable interrupts, make sure 3221 * to clear it 3222 */ 3223 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 3224 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 3225 ~HPT_CSEL_IRQDIS); 3226 } 3227 return; 3228 } 3229 3230 void 3231 hpt_setup_channel(chp) 3232 struct channel_softc *chp; 3233 { 3234 struct ata_drive_datas *drvp; 3235 int drive; 3236 int cable; 3237 u_int32_t before, after; 3238 u_int32_t idedma_ctl; 3239 struct pciide_channel *cp = (struct pciide_channel*)chp; 3240 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3241 3242 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 3243 3244 /* setup DMA if needed */ 3245 pciide_channel_dma_setup(cp); 3246 3247 idedma_ctl = 0; 3248 3249 /* Per drive settings */ 3250 for (drive = 0; drive < 2; drive++) { 3251 drvp = &chp->ch_drive[drive]; 3252 /* If no drive, skip */ 3253 if ((drvp->drive_flags & DRIVE) == 0) 3254 continue; 3255 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 3256 HPT_IDETIM(chp->channel, drive)); 3257 3258 /* add timing values, setup DMA if needed */ 3259 if (drvp->drive_flags & DRIVE_UDMA) { 3260 /* use Ultra/DMA */ 3261 drvp->drive_flags &= ~DRIVE_DMA; 3262 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 3263 drvp->UDMA_mode > 2) 3264 drvp->UDMA_mode = 2; 3265 after = (sc->sc_wdcdev.nchannels == 2) ? 3266 hpt370_udma[drvp->UDMA_mode] : 3267 hpt366_udma[drvp->UDMA_mode]; 3268 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3269 } else if (drvp->drive_flags & DRIVE_DMA) { 3270 /* 3271 * use Multiword DMA. 3272 * Timings will be used for both PIO and DMA, so adjust 3273 * DMA mode if needed 3274 */ 3275 if (drvp->PIO_mode >= 3 && 3276 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3277 drvp->DMA_mode = drvp->PIO_mode - 2; 3278 } 3279 after = (sc->sc_wdcdev.nchannels == 2) ? 3280 hpt370_dma[drvp->DMA_mode] : 3281 hpt366_dma[drvp->DMA_mode]; 3282 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3283 } else { 3284 /* PIO only */ 3285 after = (sc->sc_wdcdev.nchannels == 2) ? 3286 hpt370_pio[drvp->PIO_mode] : 3287 hpt366_pio[drvp->PIO_mode]; 3288 } 3289 pci_conf_write(sc->sc_pc, sc->sc_tag, 3290 HPT_IDETIM(chp->channel, drive), after); 3291 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 3292 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname, 3293 after, before), DEBUG_PROBE); 3294 } 3295 if (idedma_ctl != 0) { 3296 /* Add software bits in status register */ 3297 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3298 IDEDMA_CTL, idedma_ctl); 3299 } 3300 pciide_print_modes(cp); 3301 } 3302 3303 int 3304 hpt_pci_intr(arg) 3305 void *arg; 3306 { 3307 struct pciide_softc *sc = arg; 3308 struct pciide_channel *cp; 3309 struct channel_softc *wdc_cp; 3310 int rv = 0; 3311 int dmastat, i, crv; 3312 3313 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3314 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3315 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3316 if((dmastat & IDEDMA_CTL_INTR) == 0) 3317 continue; 3318 cp = &sc->pciide_channels[i]; 3319 wdc_cp = &cp->wdc_channel; 3320 crv = wdcintr(wdc_cp); 3321 if (crv == 0) { 3322 printf("%s:%d: bogus intr\n", 3323 sc->sc_wdcdev.sc_dev.dv_xname, i); 3324 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3325 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 3326 } else 3327 rv = 1; 3328 } 3329 return rv; 3330 } 3331 3332 3333 /* Macros to test product */ 3334 #define PDC_IS_262(sc) \ 3335 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \ 3336 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3337 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) 3338 #define PDC_IS_265(sc) \ 3339 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3340 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) 3341 3342 void 3343 pdc202xx_chip_map(sc, pa) 3344 struct pciide_softc *sc; 3345 struct pci_attach_args *pa; 3346 { 3347 struct pciide_channel *cp; 3348 int channel; 3349 pcireg_t interface, st, mode; 3350 bus_size_t cmdsize, ctlsize; 3351 3352 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3353 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st), 3354 DEBUG_PROBE); 3355 if (pciide_chipen(sc, pa) == 0) 3356 return; 3357 3358 /* turn off RAID mode */ 3359 st &= ~PDC2xx_STATE_IDERAID; 3360 3361 /* 3362 * can't rely on the PCI_CLASS_REG content if the chip was in raid 3363 * mode. We have to fake interface 3364 */ 3365 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 3366 if (st & PDC2xx_STATE_NATIVE) 3367 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3368 3369 printf("%s: bus-master DMA support present", 3370 sc->sc_wdcdev.sc_dev.dv_xname); 3371 pciide_mapreg_dma(sc, pa); 3372 printf("\n"); 3373 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3374 WDC_CAPABILITY_MODE; 3375 if (sc->sc_dma_ok) { 3376 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3377 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3378 sc->sc_wdcdev.irqack = pciide_irqack; 3379 } 3380 sc->sc_wdcdev.PIO_cap = 4; 3381 sc->sc_wdcdev.DMA_cap = 2; 3382 if (PDC_IS_265(sc)) 3383 sc->sc_wdcdev.UDMA_cap = 5; 3384 else if (PDC_IS_262(sc)) 3385 sc->sc_wdcdev.UDMA_cap = 4; 3386 else 3387 sc->sc_wdcdev.UDMA_cap = 2; 3388 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel; 3389 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3390 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3391 3392 /* setup failsafe defaults */ 3393 mode = 0; 3394 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 3395 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 3396 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 3397 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 3398 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3399 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 " 3400 "initial timings 0x%x, now 0x%x\n", channel, 3401 pci_conf_read(sc->sc_pc, sc->sc_tag, 3402 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 3403 DEBUG_PROBE); 3404 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0), 3405 mode | PDC2xx_TIM_IORDYp); 3406 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 " 3407 "initial timings 0x%x, now 0x%x\n", channel, 3408 pci_conf_read(sc->sc_pc, sc->sc_tag, 3409 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 3410 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1), 3411 mode); 3412 } 3413 3414 mode = PDC2xx_SCR_DMA; 3415 if (PDC_IS_262(sc)) { 3416 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 3417 } else { 3418 /* the BIOS set it up this way */ 3419 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 3420 } 3421 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 3422 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 3423 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n", 3424 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode), 3425 DEBUG_PROBE); 3426 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode); 3427 3428 /* controller initial state register is OK even without BIOS */ 3429 /* Set DMA mode to IDE DMA compatibility */ 3430 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 3431 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ), 3432 DEBUG_PROBE); 3433 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 3434 mode | 0x1); 3435 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 3436 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 3437 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 3438 mode | 0x1); 3439 3440 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3441 cp = &sc->pciide_channels[channel]; 3442 if (pciide_chansetup(sc, channel, interface) == 0) 3443 continue; 3444 if ((st & (PDC_IS_262(sc) ? 3445 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 3446 printf("%s: %s channel ignored (disabled)\n", 3447 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3448 continue; 3449 } 3450 if (PDC_IS_265(sc)) 3451 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3452 pdc20265_pci_intr); 3453 else 3454 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3455 pdc202xx_pci_intr); 3456 if (cp->hw_ok == 0) 3457 continue; 3458 if (pciide_chan_candisable(cp)) 3459 st &= ~(PDC_IS_262(sc) ? 3460 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 3461 pciide_map_compat_intr(pa, cp, channel, interface); 3462 pdc202xx_setup_channel(&cp->wdc_channel); 3463 } 3464 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st), 3465 DEBUG_PROBE); 3466 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 3467 return; 3468 } 3469 3470 void 3471 pdc202xx_setup_channel(chp) 3472 struct channel_softc *chp; 3473 { 3474 struct ata_drive_datas *drvp; 3475 int drive; 3476 pcireg_t mode, st; 3477 u_int32_t idedma_ctl, scr, atapi; 3478 struct pciide_channel *cp = (struct pciide_channel*)chp; 3479 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3480 int channel = chp->channel; 3481 3482 /* setup DMA if needed */ 3483 pciide_channel_dma_setup(cp); 3484 3485 idedma_ctl = 0; 3486 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 3487 sc->sc_wdcdev.sc_dev.dv_xname, 3488 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 3489 DEBUG_PROBE); 3490 3491 /* Per channel settings */ 3492 if (PDC_IS_262(sc)) { 3493 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3494 PDC262_U66); 3495 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3496 /* Trimm UDMA mode */ 3497 if ((st & PDC262_STATE_80P(channel)) != 0 || 3498 (chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3499 chp->ch_drive[0].UDMA_mode <= 2) || 3500 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3501 chp->ch_drive[1].UDMA_mode <= 2)) { 3502 if (chp->ch_drive[0].UDMA_mode > 2) 3503 chp->ch_drive[0].UDMA_mode = 2; 3504 if (chp->ch_drive[1].UDMA_mode > 2) 3505 chp->ch_drive[1].UDMA_mode = 2; 3506 } 3507 /* Set U66 if needed */ 3508 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3509 chp->ch_drive[0].UDMA_mode > 2) || 3510 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3511 chp->ch_drive[1].UDMA_mode > 2)) 3512 scr |= PDC262_U66_EN(channel); 3513 else 3514 scr &= ~PDC262_U66_EN(channel); 3515 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3516 PDC262_U66, scr); 3517 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 3518 sc->sc_wdcdev.sc_dev.dv_xname, channel, 3519 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3520 PDC262_ATAPI(channel))), DEBUG_PROBE); 3521 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 3522 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 3523 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3524 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3525 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 3526 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3527 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3528 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 3529 atapi = 0; 3530 else 3531 atapi = PDC262_ATAPI_UDMA; 3532 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3533 PDC262_ATAPI(channel), atapi); 3534 } 3535 } 3536 for (drive = 0; drive < 2; drive++) { 3537 drvp = &chp->ch_drive[drive]; 3538 /* If no drive, skip */ 3539 if ((drvp->drive_flags & DRIVE) == 0) 3540 continue; 3541 mode = 0; 3542 if (drvp->drive_flags & DRIVE_UDMA) { 3543 /* use Ultra/DMA */ 3544 drvp->drive_flags &= ~DRIVE_DMA; 3545 mode = PDC2xx_TIM_SET_MB(mode, 3546 pdc2xx_udma_mb[drvp->UDMA_mode]); 3547 mode = PDC2xx_TIM_SET_MC(mode, 3548 pdc2xx_udma_mc[drvp->UDMA_mode]); 3549 drvp->drive_flags &= ~DRIVE_DMA; 3550 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3551 } else if (drvp->drive_flags & DRIVE_DMA) { 3552 mode = PDC2xx_TIM_SET_MB(mode, 3553 pdc2xx_dma_mb[drvp->DMA_mode]); 3554 mode = PDC2xx_TIM_SET_MC(mode, 3555 pdc2xx_dma_mc[drvp->DMA_mode]); 3556 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3557 } else { 3558 mode = PDC2xx_TIM_SET_MB(mode, 3559 pdc2xx_dma_mb[0]); 3560 mode = PDC2xx_TIM_SET_MC(mode, 3561 pdc2xx_dma_mc[0]); 3562 } 3563 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 3564 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 3565 if (drvp->drive_flags & DRIVE_ATA) 3566 mode |= PDC2xx_TIM_PRE; 3567 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 3568 if (drvp->PIO_mode >= 3) { 3569 mode |= PDC2xx_TIM_IORDY; 3570 if (drive == 0) 3571 mode |= PDC2xx_TIM_IORDYp; 3572 } 3573 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 3574 "timings 0x%x\n", 3575 sc->sc_wdcdev.sc_dev.dv_xname, 3576 chp->channel, drive, mode), DEBUG_PROBE); 3577 pci_conf_write(sc->sc_pc, sc->sc_tag, 3578 PDC2xx_TIM(chp->channel, drive), mode); 3579 } 3580 if (idedma_ctl != 0) { 3581 /* Add software bits in status register */ 3582 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3583 IDEDMA_CTL, idedma_ctl); 3584 } 3585 pciide_print_modes(cp); 3586 } 3587 3588 int 3589 pdc202xx_pci_intr(arg) 3590 void *arg; 3591 { 3592 struct pciide_softc *sc = arg; 3593 struct pciide_channel *cp; 3594 struct channel_softc *wdc_cp; 3595 int i, rv, crv; 3596 u_int32_t scr; 3597 3598 rv = 0; 3599 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 3600 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3601 cp = &sc->pciide_channels[i]; 3602 wdc_cp = &cp->wdc_channel; 3603 /* If a compat channel skip. */ 3604 if (cp->compat) 3605 continue; 3606 if (scr & PDC2xx_SCR_INT(i)) { 3607 crv = wdcintr(wdc_cp); 3608 if (crv == 0) 3609 printf("%s:%d: bogus intr (reg 0x%x)\n", 3610 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 3611 else 3612 rv = 1; 3613 } 3614 } 3615 return rv; 3616 } 3617 3618 int 3619 pdc20265_pci_intr(arg) 3620 void *arg; 3621 { 3622 struct pciide_softc *sc = arg; 3623 struct pciide_channel *cp; 3624 struct channel_softc *wdc_cp; 3625 int i, rv, crv; 3626 u_int32_t dmastat; 3627 3628 rv = 0; 3629 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3630 cp = &sc->pciide_channels[i]; 3631 wdc_cp = &cp->wdc_channel; 3632 /* If a compat channel skip. */ 3633 if (cp->compat) 3634 continue; 3635 /* 3636 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 3637 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 3638 * So use it instead (requires 2 reg reads instead of 1, 3639 * but we can't do it another way). 3640 */ 3641 dmastat = bus_space_read_1(sc->sc_dma_iot, 3642 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3643 if((dmastat & IDEDMA_CTL_INTR) == 0) 3644 continue; 3645 crv = wdcintr(wdc_cp); 3646 if (crv == 0) 3647 printf("%s:%d: bogus intr\n", 3648 sc->sc_wdcdev.sc_dev.dv_xname, i); 3649 else 3650 rv = 1; 3651 } 3652 return rv; 3653 } 3654 3655 void 3656 opti_chip_map(sc, pa) 3657 struct pciide_softc *sc; 3658 struct pci_attach_args *pa; 3659 { 3660 struct pciide_channel *cp; 3661 bus_size_t cmdsize, ctlsize; 3662 pcireg_t interface; 3663 u_int8_t init_ctrl; 3664 int channel; 3665 3666 if (pciide_chipen(sc, pa) == 0) 3667 return; 3668 printf("%s: bus-master DMA support present", 3669 sc->sc_wdcdev.sc_dev.dv_xname); 3670 3671 /* 3672 * XXXSCW: 3673 * There seem to be a couple of buggy revisions/implementations 3674 * of the OPTi pciide chipset. This kludge seems to fix one of 3675 * the reported problems (PR/11644) but still fails for the 3676 * other (PR/13151), although the latter may be due to other 3677 * issues too... 3678 */ 3679 if (PCI_REVISION(pa->pa_class) <= 0x12) { 3680 printf(" but disabled due to chip rev. <= 0x12"); 3681 sc->sc_dma_ok = 0; 3682 sc->sc_wdcdev.cap = 0; 3683 } else { 3684 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 3685 pciide_mapreg_dma(sc, pa); 3686 } 3687 printf("\n"); 3688 3689 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 3690 sc->sc_wdcdev.PIO_cap = 4; 3691 if (sc->sc_dma_ok) { 3692 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3693 sc->sc_wdcdev.irqack = pciide_irqack; 3694 sc->sc_wdcdev.DMA_cap = 2; 3695 } 3696 sc->sc_wdcdev.set_modes = opti_setup_channel; 3697 3698 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3699 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3700 3701 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 3702 OPTI_REG_INIT_CONTROL); 3703 3704 interface = PCI_INTERFACE(pa->pa_class); 3705 3706 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3707 cp = &sc->pciide_channels[channel]; 3708 if (pciide_chansetup(sc, channel, interface) == 0) 3709 continue; 3710 if (channel == 1 && 3711 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 3712 printf("%s: %s channel ignored (disabled)\n", 3713 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3714 continue; 3715 } 3716 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3717 pciide_pci_intr); 3718 if (cp->hw_ok == 0) 3719 continue; 3720 pciide_map_compat_intr(pa, cp, channel, interface); 3721 if (cp->hw_ok == 0) 3722 continue; 3723 opti_setup_channel(&cp->wdc_channel); 3724 } 3725 } 3726 3727 void 3728 opti_setup_channel(chp) 3729 struct channel_softc *chp; 3730 { 3731 struct ata_drive_datas *drvp; 3732 struct pciide_channel *cp = (struct pciide_channel*)chp; 3733 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3734 int drive, spd; 3735 int mode[2]; 3736 u_int8_t rv, mr; 3737 3738 /* 3739 * The `Delay' and `Address Setup Time' fields of the 3740 * Miscellaneous Register are always zero initially. 3741 */ 3742 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 3743 mr &= ~(OPTI_MISC_DELAY_MASK | 3744 OPTI_MISC_ADDR_SETUP_MASK | 3745 OPTI_MISC_INDEX_MASK); 3746 3747 /* Prime the control register before setting timing values */ 3748 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 3749 3750 /* Determine the clockrate of the PCIbus the chip is attached to */ 3751 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 3752 spd &= OPTI_STRAP_PCI_SPEED_MASK; 3753 3754 /* setup DMA if needed */ 3755 pciide_channel_dma_setup(cp); 3756 3757 for (drive = 0; drive < 2; drive++) { 3758 drvp = &chp->ch_drive[drive]; 3759 /* If no drive, skip */ 3760 if ((drvp->drive_flags & DRIVE) == 0) { 3761 mode[drive] = -1; 3762 continue; 3763 } 3764 3765 if ((drvp->drive_flags & DRIVE_DMA)) { 3766 /* 3767 * Timings will be used for both PIO and DMA, 3768 * so adjust DMA mode if needed 3769 */ 3770 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3771 drvp->PIO_mode = drvp->DMA_mode + 2; 3772 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3773 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3774 drvp->PIO_mode - 2 : 0; 3775 if (drvp->DMA_mode == 0) 3776 drvp->PIO_mode = 0; 3777 3778 mode[drive] = drvp->DMA_mode + 5; 3779 } else 3780 mode[drive] = drvp->PIO_mode; 3781 3782 if (drive && mode[0] >= 0 && 3783 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 3784 /* 3785 * Can't have two drives using different values 3786 * for `Address Setup Time'. 3787 * Slow down the faster drive to compensate. 3788 */ 3789 int d = (opti_tim_as[spd][mode[0]] > 3790 opti_tim_as[spd][mode[1]]) ? 0 : 1; 3791 3792 mode[d] = mode[1-d]; 3793 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 3794 chp->ch_drive[d].DMA_mode = 0; 3795 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 3796 } 3797 } 3798 3799 for (drive = 0; drive < 2; drive++) { 3800 int m; 3801 if ((m = mode[drive]) < 0) 3802 continue; 3803 3804 /* Set the Address Setup Time and select appropriate index */ 3805 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 3806 rv |= OPTI_MISC_INDEX(drive); 3807 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 3808 3809 /* Set the pulse width and recovery timing parameters */ 3810 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 3811 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 3812 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 3813 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 3814 3815 /* Set the Enhanced Mode register appropriately */ 3816 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 3817 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 3818 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 3819 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 3820 } 3821 3822 /* Finally, enable the timings */ 3823 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 3824 3825 pciide_print_modes(cp); 3826 } 3827 3828 #define ACARD_IS_850(sc) \ 3829 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 3830 3831 void 3832 acard_chip_map(sc, pa) 3833 struct pciide_softc *sc; 3834 struct pci_attach_args *pa; 3835 { 3836 struct pciide_channel *cp; 3837 int i; 3838 pcireg_t interface; 3839 bus_size_t cmdsize, ctlsize; 3840 3841 if (pciide_chipen(sc, pa) == 0) 3842 return; 3843 3844 /* 3845 * when the chip is in native mode it identifies itself as a 3846 * 'misc mass storage'. Fake interface in this case. 3847 */ 3848 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3849 interface = PCI_INTERFACE(pa->pa_class); 3850 } else { 3851 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3852 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3853 } 3854 3855 printf("%s: bus-master DMA support present", 3856 sc->sc_wdcdev.sc_dev.dv_xname); 3857 pciide_mapreg_dma(sc, pa); 3858 printf("\n"); 3859 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3860 WDC_CAPABILITY_MODE; 3861 3862 if (sc->sc_dma_ok) { 3863 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3864 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3865 sc->sc_wdcdev.irqack = pciide_irqack; 3866 } 3867 sc->sc_wdcdev.PIO_cap = 4; 3868 sc->sc_wdcdev.DMA_cap = 2; 3869 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4; 3870 3871 sc->sc_wdcdev.set_modes = acard_setup_channel; 3872 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3873 sc->sc_wdcdev.nchannels = 2; 3874 3875 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3876 cp = &sc->pciide_channels[i]; 3877 if (pciide_chansetup(sc, i, interface) == 0) 3878 continue; 3879 if (interface & PCIIDE_INTERFACE_PCI(i)) { 3880 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 3881 &ctlsize, pciide_pci_intr); 3882 } else { 3883 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 3884 &cmdsize, &ctlsize); 3885 } 3886 if (cp->hw_ok == 0) 3887 return; 3888 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 3889 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 3890 wdcattach(&cp->wdc_channel); 3891 acard_setup_channel(&cp->wdc_channel); 3892 } 3893 if (!ACARD_IS_850(sc)) { 3894 u_int32_t reg; 3895 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 3896 reg &= ~ATP860_CTRL_INT; 3897 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 3898 } 3899 } 3900 3901 void 3902 acard_setup_channel(chp) 3903 struct channel_softc *chp; 3904 { 3905 struct ata_drive_datas *drvp; 3906 struct pciide_channel *cp = (struct pciide_channel*)chp; 3907 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3908 int channel = chp->channel; 3909 int drive; 3910 u_int32_t idetime, udma_mode; 3911 u_int32_t idedma_ctl; 3912 3913 /* setup DMA if needed */ 3914 pciide_channel_dma_setup(cp); 3915 3916 if (ACARD_IS_850(sc)) { 3917 idetime = 0; 3918 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 3919 udma_mode &= ~ATP850_UDMA_MASK(channel); 3920 } else { 3921 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 3922 idetime &= ~ATP860_SETTIME_MASK(channel); 3923 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 3924 udma_mode &= ~ATP860_UDMA_MASK(channel); 3925 } 3926 3927 idedma_ctl = 0; 3928 3929 /* Per drive settings */ 3930 for (drive = 0; drive < 2; drive++) { 3931 drvp = &chp->ch_drive[drive]; 3932 /* If no drive, skip */ 3933 if ((drvp->drive_flags & DRIVE) == 0) 3934 continue; 3935 /* add timing values, setup DMA if needed */ 3936 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3937 (drvp->drive_flags & DRIVE_UDMA)) { 3938 /* use Ultra/DMA */ 3939 if (ACARD_IS_850(sc)) { 3940 idetime |= ATP850_SETTIME(drive, 3941 acard_act_udma[drvp->UDMA_mode], 3942 acard_rec_udma[drvp->UDMA_mode]); 3943 udma_mode |= ATP850_UDMA_MODE(channel, drive, 3944 acard_udma_conf[drvp->UDMA_mode]); 3945 } else { 3946 idetime |= ATP860_SETTIME(channel, drive, 3947 acard_act_udma[drvp->UDMA_mode], 3948 acard_rec_udma[drvp->UDMA_mode]); 3949 udma_mode |= ATP860_UDMA_MODE(channel, drive, 3950 acard_udma_conf[drvp->UDMA_mode]); 3951 } 3952 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3953 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 3954 (drvp->drive_flags & DRIVE_DMA)) { 3955 /* use Multiword DMA */ 3956 drvp->drive_flags &= ~DRIVE_UDMA; 3957 if (ACARD_IS_850(sc)) { 3958 idetime |= ATP850_SETTIME(drive, 3959 acard_act_dma[drvp->DMA_mode], 3960 acard_rec_dma[drvp->DMA_mode]); 3961 } else { 3962 idetime |= ATP860_SETTIME(channel, drive, 3963 acard_act_dma[drvp->DMA_mode], 3964 acard_rec_dma[drvp->DMA_mode]); 3965 } 3966 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3967 } else { 3968 /* PIO only */ 3969 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 3970 if (ACARD_IS_850(sc)) { 3971 idetime |= ATP850_SETTIME(drive, 3972 acard_act_pio[drvp->PIO_mode], 3973 acard_rec_pio[drvp->PIO_mode]); 3974 } else { 3975 idetime |= ATP860_SETTIME(channel, drive, 3976 acard_act_pio[drvp->PIO_mode], 3977 acard_rec_pio[drvp->PIO_mode]); 3978 } 3979 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 3980 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 3981 | ATP8x0_CTRL_EN(channel)); 3982 } 3983 } 3984 3985 if (idedma_ctl != 0) { 3986 /* Add software bits in status register */ 3987 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3988 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl); 3989 } 3990 pciide_print_modes(cp); 3991 3992 if (ACARD_IS_850(sc)) { 3993 pci_conf_write(sc->sc_pc, sc->sc_tag, 3994 ATP850_IDETIME(channel), idetime); 3995 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 3996 } else { 3997 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 3998 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 3999 } 4000 } 4001 4002 int 4003 acard_pci_intr(arg) 4004 void *arg; 4005 { 4006 struct pciide_softc *sc = arg; 4007 struct pciide_channel *cp; 4008 struct channel_softc *wdc_cp; 4009 int rv = 0; 4010 int dmastat, i, crv; 4011 4012 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4013 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4014 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4015 if ((dmastat & IDEDMA_CTL_INTR) == 0) 4016 continue; 4017 cp = &sc->pciide_channels[i]; 4018 wdc_cp = &cp->wdc_channel; 4019 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) { 4020 (void)wdcintr(wdc_cp); 4021 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4022 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 4023 continue; 4024 } 4025 crv = wdcintr(wdc_cp); 4026 if (crv == 0) 4027 printf("%s:%d: bogus intr\n", 4028 sc->sc_wdcdev.sc_dev.dv_xname, i); 4029 else if (crv == 1) 4030 rv = 1; 4031 else if (rv == 0) 4032 rv = crv; 4033 } 4034 return rv; 4035 } 4036