1 /* $NetBSD: pciide.c,v 1.154 2002/06/01 18:07:42 bouyer Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 36 /* 37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by Christopher G. Demetriou 50 * for the NetBSD Project. 51 * 4. The name of the author may not be used to endorse or promote products 52 * derived from this software without specific prior written permission 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * PCI IDE controller driver. 68 * 69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 70 * sys/dev/pci/ppb.c, revision 1.16). 71 * 72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 74 * 5/16/94" from the PCI SIG. 75 * 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.154 2002/06/01 18:07:42 bouyer Exp $"); 80 81 #ifndef WDCDEBUG 82 #define WDCDEBUG 83 #endif 84 85 #define DEBUG_DMA 0x01 86 #define DEBUG_XFERS 0x02 87 #define DEBUG_FUNCS 0x08 88 #define DEBUG_PROBE 0x10 89 #ifdef WDCDEBUG 90 int wdcdebug_pciide_mask = 0; 91 #define WDCDEBUG_PRINT(args, level) \ 92 if (wdcdebug_pciide_mask & (level)) printf args 93 #else 94 #define WDCDEBUG_PRINT(args, level) 95 #endif 96 #include <sys/param.h> 97 #include <sys/systm.h> 98 #include <sys/device.h> 99 #include <sys/malloc.h> 100 101 #include <uvm/uvm_extern.h> 102 103 #include <machine/endian.h> 104 105 #include <dev/pci/pcireg.h> 106 #include <dev/pci/pcivar.h> 107 #include <dev/pci/pcidevs.h> 108 #include <dev/pci/pciidereg.h> 109 #include <dev/pci/pciidevar.h> 110 #include <dev/pci/pciide_piix_reg.h> 111 #include <dev/pci/pciide_amd_reg.h> 112 #include <dev/pci/pciide_apollo_reg.h> 113 #include <dev/pci/pciide_cmd_reg.h> 114 #include <dev/pci/pciide_cy693_reg.h> 115 #include <dev/pci/pciide_sis_reg.h> 116 #include <dev/pci/pciide_acer_reg.h> 117 #include <dev/pci/pciide_pdc202xx_reg.h> 118 #include <dev/pci/pciide_opti_reg.h> 119 #include <dev/pci/pciide_hpt_reg.h> 120 #include <dev/pci/pciide_acard_reg.h> 121 #include <dev/pci/pciide_sl82c105_reg.h> 122 #include <dev/pci/cy82c693var.h> 123 124 #include "opt_pciide.h" 125 126 /* inlines for reading/writing 8-bit PCI registers */ 127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t, 128 int)); 129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t, 130 int, u_int8_t)); 131 132 static __inline u_int8_t 133 pciide_pci_read(pc, pa, reg) 134 pci_chipset_tag_t pc; 135 pcitag_t pa; 136 int reg; 137 { 138 139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 140 ((reg & 0x03) * 8) & 0xff); 141 } 142 143 static __inline void 144 pciide_pci_write(pc, pa, reg, val) 145 pci_chipset_tag_t pc; 146 pcitag_t pa; 147 int reg; 148 u_int8_t val; 149 { 150 pcireg_t pcival; 151 152 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 153 pcival &= ~(0xff << ((reg & 0x03) * 8)); 154 pcival |= (val << ((reg & 0x03) * 8)); 155 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 156 } 157 158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 159 160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 161 void piix_setup_channel __P((struct channel_softc*)); 162 void piix3_4_setup_channel __P((struct channel_softc*)); 163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*)); 165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 166 167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 168 void amd7x6_setup_channel __P((struct channel_softc*)); 169 170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 171 void apollo_setup_channel __P((struct channel_softc*)); 172 173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 175 void cmd0643_9_setup_channel __P((struct channel_softc*)); 176 void cmd_channel_map __P((struct pci_attach_args *, 177 struct pciide_softc *, int)); 178 int cmd_pci_intr __P((void *)); 179 void cmd646_9_irqack __P((struct channel_softc *)); 180 181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 182 void cy693_setup_channel __P((struct channel_softc*)); 183 184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 185 void sis_setup_channel __P((struct channel_softc*)); 186 static int sis_hostbr_match __P(( struct pci_attach_args *)); 187 188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 189 void acer_setup_channel __P((struct channel_softc*)); 190 int acer_pci_intr __P((void *)); 191 192 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 193 void pdc202xx_setup_channel __P((struct channel_softc*)); 194 void pdc20268_setup_channel __P((struct channel_softc*)); 195 int pdc202xx_pci_intr __P((void *)); 196 int pdc20265_pci_intr __P((void *)); 197 198 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 199 void opti_setup_channel __P((struct channel_softc*)); 200 201 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 202 void hpt_setup_channel __P((struct channel_softc*)); 203 int hpt_pci_intr __P((void *)); 204 205 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 206 void acard_setup_channel __P((struct channel_softc*)); 207 int acard_pci_intr __P((void *)); 208 209 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 210 void serverworks_setup_channel __P((struct channel_softc*)); 211 int serverworks_pci_intr __P((void *)); 212 213 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 214 void sl82c105_setup_channel __P((struct channel_softc*)); 215 216 void pciide_channel_dma_setup __P((struct pciide_channel *)); 217 int pciide_dma_table_setup __P((struct pciide_softc*, int, int)); 218 int pciide_dma_init __P((void*, int, int, void *, size_t, int)); 219 void pciide_dma_start __P((void*, int, int)); 220 int pciide_dma_finish __P((void*, int, int, int)); 221 void pciide_irqack __P((struct channel_softc *)); 222 void pciide_print_modes __P((struct pciide_channel *)); 223 224 struct pciide_product_desc { 225 u_int32_t ide_product; 226 int ide_flags; 227 const char *ide_name; 228 /* map and setup chip, probe drives */ 229 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*)); 230 }; 231 232 /* Flags for ide_flags */ 233 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 234 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 235 236 /* Default product description for devices not known from this controller */ 237 const struct pciide_product_desc default_product_desc = { 238 0, 239 0, 240 "Generic PCI IDE controller", 241 default_chip_map, 242 }; 243 244 const struct pciide_product_desc pciide_intel_products[] = { 245 { PCI_PRODUCT_INTEL_82092AA, 246 0, 247 "Intel 82092AA IDE controller", 248 default_chip_map, 249 }, 250 { PCI_PRODUCT_INTEL_82371FB_IDE, 251 0, 252 "Intel 82371FB IDE controller (PIIX)", 253 piix_chip_map, 254 }, 255 { PCI_PRODUCT_INTEL_82371SB_IDE, 256 0, 257 "Intel 82371SB IDE Interface (PIIX3)", 258 piix_chip_map, 259 }, 260 { PCI_PRODUCT_INTEL_82371AB_IDE, 261 0, 262 "Intel 82371AB IDE controller (PIIX4)", 263 piix_chip_map, 264 }, 265 { PCI_PRODUCT_INTEL_82440MX_IDE, 266 0, 267 "Intel 82440MX IDE controller", 268 piix_chip_map 269 }, 270 { PCI_PRODUCT_INTEL_82801AA_IDE, 271 0, 272 "Intel 82801AA IDE Controller (ICH)", 273 piix_chip_map, 274 }, 275 { PCI_PRODUCT_INTEL_82801AB_IDE, 276 0, 277 "Intel 82801AB IDE Controller (ICH0)", 278 piix_chip_map, 279 }, 280 { PCI_PRODUCT_INTEL_82801BA_IDE, 281 0, 282 "Intel 82801BA IDE Controller (ICH2)", 283 piix_chip_map, 284 }, 285 { PCI_PRODUCT_INTEL_82801BAM_IDE, 286 0, 287 "Intel 82801BAM IDE Controller (ICH2)", 288 piix_chip_map, 289 }, 290 { PCI_PRODUCT_INTEL_82801CA_IDE_1, 291 0, 292 "Intel 82201CA IDE Controller", 293 piix_chip_map, 294 }, 295 { PCI_PRODUCT_INTEL_82801CA_IDE_2, 296 0, 297 "Intel 82201CA IDE Controller", 298 piix_chip_map, 299 }, 300 { 0, 301 0, 302 NULL, 303 NULL 304 } 305 }; 306 307 const struct pciide_product_desc pciide_amd_products[] = { 308 { PCI_PRODUCT_AMD_PBC756_IDE, 309 0, 310 "Advanced Micro Devices AMD756 IDE Controller", 311 amd7x6_chip_map 312 }, 313 { PCI_PRODUCT_AMD_PBC766_IDE, 314 0, 315 "Advanced Micro Devices AMD766 IDE Controller", 316 amd7x6_chip_map 317 }, 318 { PCI_PRODUCT_AMD_PBC768_IDE, 319 0, 320 "Advanced Micro Devices AMD768 IDE Controller", 321 amd7x6_chip_map 322 }, 323 { 0, 324 0, 325 NULL, 326 NULL 327 } 328 }; 329 330 const struct pciide_product_desc pciide_cmd_products[] = { 331 { PCI_PRODUCT_CMDTECH_640, 332 0, 333 "CMD Technology PCI0640", 334 cmd_chip_map 335 }, 336 { PCI_PRODUCT_CMDTECH_643, 337 0, 338 "CMD Technology PCI0643", 339 cmd0643_9_chip_map, 340 }, 341 { PCI_PRODUCT_CMDTECH_646, 342 0, 343 "CMD Technology PCI0646", 344 cmd0643_9_chip_map, 345 }, 346 { PCI_PRODUCT_CMDTECH_648, 347 IDE_PCI_CLASS_OVERRIDE, 348 "CMD Technology PCI0648", 349 cmd0643_9_chip_map, 350 }, 351 { PCI_PRODUCT_CMDTECH_649, 352 IDE_PCI_CLASS_OVERRIDE, 353 "CMD Technology PCI0649", 354 cmd0643_9_chip_map, 355 }, 356 { 0, 357 0, 358 NULL, 359 NULL 360 } 361 }; 362 363 const struct pciide_product_desc pciide_via_products[] = { 364 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 365 0, 366 NULL, 367 apollo_chip_map, 368 }, 369 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 370 0, 371 NULL, 372 apollo_chip_map, 373 }, 374 { 0, 375 0, 376 NULL, 377 NULL 378 } 379 }; 380 381 const struct pciide_product_desc pciide_cypress_products[] = { 382 { PCI_PRODUCT_CONTAQ_82C693, 383 IDE_16BIT_IOSPACE, 384 "Cypress 82C693 IDE Controller", 385 cy693_chip_map, 386 }, 387 { 0, 388 0, 389 NULL, 390 NULL 391 } 392 }; 393 394 const struct pciide_product_desc pciide_sis_products[] = { 395 { PCI_PRODUCT_SIS_5597_IDE, 396 0, 397 "Silicon Integrated System 5597/5598 IDE controller", 398 sis_chip_map, 399 }, 400 { 0, 401 0, 402 NULL, 403 NULL 404 } 405 }; 406 407 const struct pciide_product_desc pciide_acer_products[] = { 408 { PCI_PRODUCT_ALI_M5229, 409 0, 410 "Acer Labs M5229 UDMA IDE Controller", 411 acer_chip_map, 412 }, 413 { 0, 414 0, 415 NULL, 416 NULL 417 } 418 }; 419 420 const struct pciide_product_desc pciide_promise_products[] = { 421 { PCI_PRODUCT_PROMISE_ULTRA33, 422 IDE_PCI_CLASS_OVERRIDE, 423 "Promise Ultra33/ATA Bus Master IDE Accelerator", 424 pdc202xx_chip_map, 425 }, 426 { PCI_PRODUCT_PROMISE_ULTRA66, 427 IDE_PCI_CLASS_OVERRIDE, 428 "Promise Ultra66/ATA Bus Master IDE Accelerator", 429 pdc202xx_chip_map, 430 }, 431 { PCI_PRODUCT_PROMISE_ULTRA100, 432 IDE_PCI_CLASS_OVERRIDE, 433 "Promise Ultra100/ATA Bus Master IDE Accelerator", 434 pdc202xx_chip_map, 435 }, 436 { PCI_PRODUCT_PROMISE_ULTRA100X, 437 IDE_PCI_CLASS_OVERRIDE, 438 "Promise Ultra100/ATA Bus Master IDE Accelerator", 439 pdc202xx_chip_map, 440 }, 441 { PCI_PRODUCT_PROMISE_ULTRA100TX2, 442 IDE_PCI_CLASS_OVERRIDE, 443 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator", 444 pdc202xx_chip_map, 445 }, 446 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2, 447 IDE_PCI_CLASS_OVERRIDE, 448 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator", 449 pdc202xx_chip_map, 450 }, 451 { PCI_PRODUCT_PROMISE_ULTRA133, 452 IDE_PCI_CLASS_OVERRIDE, 453 "Promise Ultra133/ATA Bus Master IDE Accelerator", 454 pdc202xx_chip_map, 455 }, 456 { 0, 457 0, 458 NULL, 459 NULL 460 } 461 }; 462 463 const struct pciide_product_desc pciide_opti_products[] = { 464 { PCI_PRODUCT_OPTI_82C621, 465 0, 466 "OPTi 82c621 PCI IDE controller", 467 opti_chip_map, 468 }, 469 { PCI_PRODUCT_OPTI_82C568, 470 0, 471 "OPTi 82c568 (82c621 compatible) PCI IDE controller", 472 opti_chip_map, 473 }, 474 { PCI_PRODUCT_OPTI_82D568, 475 0, 476 "OPTi 82d568 (82c621 compatible) PCI IDE controller", 477 opti_chip_map, 478 }, 479 { 0, 480 0, 481 NULL, 482 NULL 483 } 484 }; 485 486 const struct pciide_product_desc pciide_triones_products[] = { 487 { PCI_PRODUCT_TRIONES_HPT366, 488 IDE_PCI_CLASS_OVERRIDE, 489 NULL, 490 hpt_chip_map, 491 }, 492 { PCI_PRODUCT_TRIONES_HPT374, 493 IDE_PCI_CLASS_OVERRIDE, 494 NULL, 495 hpt_chip_map 496 }, 497 { 0, 498 0, 499 NULL, 500 NULL 501 } 502 }; 503 504 const struct pciide_product_desc pciide_acard_products[] = { 505 { PCI_PRODUCT_ACARD_ATP850U, 506 IDE_PCI_CLASS_OVERRIDE, 507 "Acard ATP850U Ultra33 IDE Controller", 508 acard_chip_map, 509 }, 510 { PCI_PRODUCT_ACARD_ATP860, 511 IDE_PCI_CLASS_OVERRIDE, 512 "Acard ATP860 Ultra66 IDE Controller", 513 acard_chip_map, 514 }, 515 { PCI_PRODUCT_ACARD_ATP860A, 516 IDE_PCI_CLASS_OVERRIDE, 517 "Acard ATP860-A Ultra66 IDE Controller", 518 acard_chip_map, 519 }, 520 { 0, 521 0, 522 NULL, 523 NULL 524 } 525 }; 526 527 const struct pciide_product_desc pciide_serverworks_products[] = { 528 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE, 529 0, 530 "ServerWorks OSB4 IDE Controller", 531 serverworks_chip_map, 532 }, 533 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE, 534 0, 535 "ServerWorks CSB5 IDE Controller", 536 serverworks_chip_map, 537 }, 538 { 0, 539 0, 540 NULL, 541 } 542 }; 543 544 const struct pciide_product_desc pciide_symphony_products[] = { 545 { PCI_PRODUCT_SYMPHONY_82C105, 546 0, 547 "Symphony Labs 82C105 IDE controller", 548 sl82c105_chip_map, 549 }, 550 { 0, 551 0, 552 NULL, 553 } 554 }; 555 556 const struct pciide_product_desc pciide_winbond_products[] = { 557 { PCI_PRODUCT_WINBOND_W83C553F_1, 558 0, 559 "Winbond W83C553F IDE controller", 560 sl82c105_chip_map, 561 }, 562 { 0, 563 0, 564 NULL, 565 } 566 }; 567 568 struct pciide_vendor_desc { 569 u_int32_t ide_vendor; 570 const struct pciide_product_desc *ide_products; 571 }; 572 573 const struct pciide_vendor_desc pciide_vendors[] = { 574 { PCI_VENDOR_INTEL, pciide_intel_products }, 575 { PCI_VENDOR_CMDTECH, pciide_cmd_products }, 576 { PCI_VENDOR_VIATECH, pciide_via_products }, 577 { PCI_VENDOR_CONTAQ, pciide_cypress_products }, 578 { PCI_VENDOR_SIS, pciide_sis_products }, 579 { PCI_VENDOR_ALI, pciide_acer_products }, 580 { PCI_VENDOR_PROMISE, pciide_promise_products }, 581 { PCI_VENDOR_AMD, pciide_amd_products }, 582 { PCI_VENDOR_OPTI, pciide_opti_products }, 583 { PCI_VENDOR_TRIONES, pciide_triones_products }, 584 { PCI_VENDOR_ACARD, pciide_acard_products }, 585 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products }, 586 { PCI_VENDOR_SYMPHONY, pciide_symphony_products }, 587 { PCI_VENDOR_WINBOND, pciide_winbond_products }, 588 { 0, NULL } 589 }; 590 591 /* options passed via the 'flags' config keyword */ 592 #define PCIIDE_OPTIONS_DMA 0x01 593 #define PCIIDE_OPTIONS_NODMA 0x02 594 595 int pciide_match __P((struct device *, struct cfdata *, void *)); 596 void pciide_attach __P((struct device *, struct device *, void *)); 597 598 struct cfattach pciide_ca = { 599 sizeof(struct pciide_softc), pciide_match, pciide_attach 600 }; 601 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *)); 602 int pciide_mapregs_compat __P(( struct pci_attach_args *, 603 struct pciide_channel *, int, bus_size_t *, bus_size_t*)); 604 int pciide_mapregs_native __P((struct pci_attach_args *, 605 struct pciide_channel *, bus_size_t *, bus_size_t *, 606 int (*pci_intr) __P((void *)))); 607 void pciide_mapreg_dma __P((struct pciide_softc *, 608 struct pci_attach_args *)); 609 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t)); 610 void pciide_mapchan __P((struct pci_attach_args *, 611 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 612 int (*pci_intr) __P((void *)))); 613 int pciide_chan_candisable __P((struct pciide_channel *)); 614 void pciide_map_compat_intr __P(( struct pci_attach_args *, 615 struct pciide_channel *, int, int)); 616 int pciide_compat_intr __P((void *)); 617 int pciide_pci_intr __P((void *)); 618 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t)); 619 620 const struct pciide_product_desc * 621 pciide_lookup_product(id) 622 u_int32_t id; 623 { 624 const struct pciide_product_desc *pp; 625 const struct pciide_vendor_desc *vp; 626 627 for (vp = pciide_vendors; vp->ide_products != NULL; vp++) 628 if (PCI_VENDOR(id) == vp->ide_vendor) 629 break; 630 631 if ((pp = vp->ide_products) == NULL) 632 return NULL; 633 634 for (; pp->chip_map != NULL; pp++) 635 if (PCI_PRODUCT(id) == pp->ide_product) 636 break; 637 638 if (pp->chip_map == NULL) 639 return NULL; 640 return pp; 641 } 642 643 int 644 pciide_match(parent, match, aux) 645 struct device *parent; 646 struct cfdata *match; 647 void *aux; 648 { 649 struct pci_attach_args *pa = aux; 650 const struct pciide_product_desc *pp; 651 652 /* 653 * Check the ID register to see that it's a PCI IDE controller. 654 * If it is, we assume that we can deal with it; it _should_ 655 * work in a standardized way... 656 */ 657 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 658 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 659 return (1); 660 } 661 662 /* 663 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE 664 * controllers. Let see if we can deal with it anyway. 665 */ 666 pp = pciide_lookup_product(pa->pa_id); 667 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 668 return (1); 669 } 670 671 return (0); 672 } 673 674 void 675 pciide_attach(parent, self, aux) 676 struct device *parent, *self; 677 void *aux; 678 { 679 struct pci_attach_args *pa = aux; 680 pci_chipset_tag_t pc = pa->pa_pc; 681 pcitag_t tag = pa->pa_tag; 682 struct pciide_softc *sc = (struct pciide_softc *)self; 683 pcireg_t csr; 684 char devinfo[256]; 685 const char *displaydev; 686 687 sc->sc_pp = pciide_lookup_product(pa->pa_id); 688 if (sc->sc_pp == NULL) { 689 sc->sc_pp = &default_product_desc; 690 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); 691 displaydev = devinfo; 692 } else 693 displaydev = sc->sc_pp->ide_name; 694 695 /* if displaydev == NULL, printf is done in chip-specific map */ 696 if (displaydev) 697 printf(": %s (rev. 0x%02x)\n", displaydev, 698 PCI_REVISION(pa->pa_class)); 699 700 sc->sc_pc = pa->pa_pc; 701 sc->sc_tag = pa->pa_tag; 702 #ifdef WDCDEBUG 703 if (wdcdebug_pciide_mask & DEBUG_PROBE) 704 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 705 #endif 706 sc->sc_pp->chip_map(sc, pa); 707 708 if (sc->sc_dma_ok) { 709 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 710 csr |= PCI_COMMAND_MASTER_ENABLE; 711 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 712 } 713 WDCDEBUG_PRINT(("pciide: command/status register=%x\n", 714 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 715 } 716 717 /* tell wether the chip is enabled or not */ 718 int 719 pciide_chipen(sc, pa) 720 struct pciide_softc *sc; 721 struct pci_attach_args *pa; 722 { 723 pcireg_t csr; 724 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 725 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 726 PCI_COMMAND_STATUS_REG); 727 printf("%s: device disabled (at %s)\n", 728 sc->sc_wdcdev.sc_dev.dv_xname, 729 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 730 "device" : "bridge"); 731 return 0; 732 } 733 return 1; 734 } 735 736 int 737 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 738 struct pci_attach_args *pa; 739 struct pciide_channel *cp; 740 int compatchan; 741 bus_size_t *cmdsizep, *ctlsizep; 742 { 743 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 744 struct channel_softc *wdc_cp = &cp->wdc_channel; 745 746 cp->compat = 1; 747 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 748 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 749 750 wdc_cp->cmd_iot = pa->pa_iot; 751 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 752 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 753 printf("%s: couldn't map %s channel cmd regs\n", 754 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 755 return (0); 756 } 757 758 wdc_cp->ctl_iot = pa->pa_iot; 759 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 760 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 761 printf("%s: couldn't map %s channel ctl regs\n", 762 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 763 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 764 PCIIDE_COMPAT_CMD_SIZE); 765 return (0); 766 } 767 768 return (1); 769 } 770 771 int 772 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 773 struct pci_attach_args * pa; 774 struct pciide_channel *cp; 775 bus_size_t *cmdsizep, *ctlsizep; 776 int (*pci_intr) __P((void *)); 777 { 778 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 779 struct channel_softc *wdc_cp = &cp->wdc_channel; 780 const char *intrstr; 781 pci_intr_handle_t intrhandle; 782 783 cp->compat = 0; 784 785 if (sc->sc_pci_ih == NULL) { 786 if (pci_intr_map(pa, &intrhandle) != 0) { 787 printf("%s: couldn't map native-PCI interrupt\n", 788 sc->sc_wdcdev.sc_dev.dv_xname); 789 return 0; 790 } 791 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 792 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 793 intrhandle, IPL_BIO, pci_intr, sc); 794 if (sc->sc_pci_ih != NULL) { 795 printf("%s: using %s for native-PCI interrupt\n", 796 sc->sc_wdcdev.sc_dev.dv_xname, 797 intrstr ? intrstr : "unknown interrupt"); 798 } else { 799 printf("%s: couldn't establish native-PCI interrupt", 800 sc->sc_wdcdev.sc_dev.dv_xname); 801 if (intrstr != NULL) 802 printf(" at %s", intrstr); 803 printf("\n"); 804 return 0; 805 } 806 } 807 cp->ih = sc->sc_pci_ih; 808 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 809 PCI_MAPREG_TYPE_IO, 0, 810 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) { 811 printf("%s: couldn't map %s channel cmd regs\n", 812 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 813 return 0; 814 } 815 816 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 817 PCI_MAPREG_TYPE_IO, 0, 818 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) { 819 printf("%s: couldn't map %s channel ctl regs\n", 820 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 821 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 822 return 0; 823 } 824 /* 825 * In native mode, 4 bytes of I/O space are mapped for the control 826 * register, the control register is at offset 2. Pass the generic 827 * code a handle for only one byte at the rigth offset. 828 */ 829 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 830 &wdc_cp->ctl_ioh) != 0) { 831 printf("%s: unable to subregion %s channel ctl regs\n", 832 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 833 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 834 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 835 return 0; 836 } 837 return (1); 838 } 839 840 void 841 pciide_mapreg_dma(sc, pa) 842 struct pciide_softc *sc; 843 struct pci_attach_args *pa; 844 { 845 pcireg_t maptype; 846 bus_addr_t addr; 847 848 /* 849 * Map DMA registers 850 * 851 * Note that sc_dma_ok is the right variable to test to see if 852 * DMA can be done. If the interface doesn't support DMA, 853 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 854 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 855 * non-zero if the interface supports DMA and the registers 856 * could be mapped. 857 * 858 * XXX Note that despite the fact that the Bus Master IDE specs 859 * XXX say that "The bus master IDE function uses 16 bytes of IO 860 * XXX space," some controllers (at least the United 861 * XXX Microelectronics UM8886BF) place it in memory space. 862 */ 863 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 864 PCIIDE_REG_BUS_MASTER_DMA); 865 866 switch (maptype) { 867 case PCI_MAPREG_TYPE_IO: 868 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 869 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 870 &addr, NULL, NULL) == 0); 871 if (sc->sc_dma_ok == 0) { 872 printf(", but unused (couldn't query registers)"); 873 break; 874 } 875 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 876 && addr >= 0x10000) { 877 sc->sc_dma_ok = 0; 878 printf(", but unused (registers at unsafe address " 879 "%#lx)", (unsigned long)addr); 880 break; 881 } 882 /* FALLTHROUGH */ 883 884 case PCI_MAPREG_MEM_TYPE_32BIT: 885 sc->sc_dma_ok = (pci_mapreg_map(pa, 886 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 887 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 888 sc->sc_dmat = pa->pa_dmat; 889 if (sc->sc_dma_ok == 0) { 890 printf(", but unused (couldn't map registers)"); 891 } else { 892 sc->sc_wdcdev.dma_arg = sc; 893 sc->sc_wdcdev.dma_init = pciide_dma_init; 894 sc->sc_wdcdev.dma_start = pciide_dma_start; 895 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 896 } 897 898 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 899 PCIIDE_OPTIONS_NODMA) { 900 printf(", but unused (forced off by config file)"); 901 sc->sc_dma_ok = 0; 902 } 903 break; 904 905 default: 906 sc->sc_dma_ok = 0; 907 printf(", but unsupported register maptype (0x%x)", maptype); 908 } 909 } 910 911 int 912 pciide_compat_intr(arg) 913 void *arg; 914 { 915 struct pciide_channel *cp = arg; 916 917 #ifdef DIAGNOSTIC 918 /* should only be called for a compat channel */ 919 if (cp->compat == 0) 920 panic("pciide compat intr called for non-compat chan %p\n", cp); 921 #endif 922 return (wdcintr(&cp->wdc_channel)); 923 } 924 925 int 926 pciide_pci_intr(arg) 927 void *arg; 928 { 929 struct pciide_softc *sc = arg; 930 struct pciide_channel *cp; 931 struct channel_softc *wdc_cp; 932 int i, rv, crv; 933 934 rv = 0; 935 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 936 cp = &sc->pciide_channels[i]; 937 wdc_cp = &cp->wdc_channel; 938 939 /* If a compat channel skip. */ 940 if (cp->compat) 941 continue; 942 /* if this channel not waiting for intr, skip */ 943 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 944 continue; 945 946 crv = wdcintr(wdc_cp); 947 if (crv == 0) 948 ; /* leave rv alone */ 949 else if (crv == 1) 950 rv = 1; /* claim the intr */ 951 else if (rv == 0) /* crv should be -1 in this case */ 952 rv = crv; /* if we've done no better, take it */ 953 } 954 return (rv); 955 } 956 957 void 958 pciide_channel_dma_setup(cp) 959 struct pciide_channel *cp; 960 { 961 int drive; 962 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 963 struct ata_drive_datas *drvp; 964 965 for (drive = 0; drive < 2; drive++) { 966 drvp = &cp->wdc_channel.ch_drive[drive]; 967 /* If no drive, skip */ 968 if ((drvp->drive_flags & DRIVE) == 0) 969 continue; 970 /* setup DMA if needed */ 971 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 972 (drvp->drive_flags & DRIVE_UDMA) == 0) || 973 sc->sc_dma_ok == 0) { 974 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 975 continue; 976 } 977 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 978 != 0) { 979 /* Abort DMA setup */ 980 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 981 continue; 982 } 983 } 984 } 985 986 int 987 pciide_dma_table_setup(sc, channel, drive) 988 struct pciide_softc *sc; 989 int channel, drive; 990 { 991 bus_dma_segment_t seg; 992 int error, rseg; 993 const bus_size_t dma_table_size = 994 sizeof(struct idedma_table) * NIDEDMA_TABLES; 995 struct pciide_dma_maps *dma_maps = 996 &sc->pciide_channels[channel].dma_maps[drive]; 997 998 /* If table was already allocated, just return */ 999 if (dma_maps->dma_table) 1000 return 0; 1001 1002 /* Allocate memory for the DMA tables and map it */ 1003 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1004 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1005 BUS_DMA_NOWAIT)) != 0) { 1006 printf("%s:%d: unable to allocate table DMA for " 1007 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1008 channel, drive, error); 1009 return error; 1010 } 1011 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1012 dma_table_size, 1013 (caddr_t *)&dma_maps->dma_table, 1014 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1015 printf("%s:%d: unable to map table DMA for" 1016 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1017 channel, drive, error); 1018 return error; 1019 } 1020 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 1021 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 1022 (unsigned long)seg.ds_addr), DEBUG_PROBE); 1023 1024 /* Create and load table DMA map for this disk */ 1025 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1026 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1027 &dma_maps->dmamap_table)) != 0) { 1028 printf("%s:%d: unable to create table DMA map for " 1029 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1030 channel, drive, error); 1031 return error; 1032 } 1033 if ((error = bus_dmamap_load(sc->sc_dmat, 1034 dma_maps->dmamap_table, 1035 dma_maps->dma_table, 1036 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1037 printf("%s:%d: unable to load table DMA map for " 1038 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1039 channel, drive, error); 1040 return error; 1041 } 1042 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1043 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 1044 DEBUG_PROBE); 1045 /* Create a xfer DMA map for this drive */ 1046 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1047 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN, 1048 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1049 &dma_maps->dmamap_xfer)) != 0) { 1050 printf("%s:%d: unable to create xfer DMA map for " 1051 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1052 channel, drive, error); 1053 return error; 1054 } 1055 return 0; 1056 } 1057 1058 int 1059 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 1060 void *v; 1061 int channel, drive; 1062 void *databuf; 1063 size_t datalen; 1064 int flags; 1065 { 1066 struct pciide_softc *sc = v; 1067 int error, seg; 1068 struct pciide_dma_maps *dma_maps = 1069 &sc->pciide_channels[channel].dma_maps[drive]; 1070 1071 error = bus_dmamap_load(sc->sc_dmat, 1072 dma_maps->dmamap_xfer, 1073 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1074 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); 1075 if (error) { 1076 printf("%s:%d: unable to load xfer DMA map for" 1077 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1078 channel, drive, error); 1079 return error; 1080 } 1081 1082 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1083 dma_maps->dmamap_xfer->dm_mapsize, 1084 (flags & WDC_DMA_READ) ? 1085 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1086 1087 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1088 #ifdef DIAGNOSTIC 1089 /* A segment must not cross a 64k boundary */ 1090 { 1091 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1092 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1093 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1094 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1095 printf("pciide_dma: segment %d physical addr 0x%lx" 1096 " len 0x%lx not properly aligned\n", 1097 seg, phys, len); 1098 panic("pciide_dma: buf align"); 1099 } 1100 } 1101 #endif 1102 dma_maps->dma_table[seg].base_addr = 1103 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1104 dma_maps->dma_table[seg].byte_count = 1105 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1106 IDEDMA_BYTE_COUNT_MASK); 1107 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1108 seg, le32toh(dma_maps->dma_table[seg].byte_count), 1109 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1110 1111 } 1112 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1113 htole32(IDEDMA_BYTE_COUNT_EOT); 1114 1115 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1116 dma_maps->dmamap_table->dm_mapsize, 1117 BUS_DMASYNC_PREWRITE); 1118 1119 /* Maps are ready. Start DMA function */ 1120 #ifdef DIAGNOSTIC 1121 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1122 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1123 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 1124 panic("pciide_dma_init: table align"); 1125 } 1126 #endif 1127 1128 /* Clear status bits */ 1129 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1130 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, 1131 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1132 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel)); 1133 /* Write table addr */ 1134 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1135 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel, 1136 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1137 /* set read/write */ 1138 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1139 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1140 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 1141 /* remember flags */ 1142 dma_maps->dma_flags = flags; 1143 return 0; 1144 } 1145 1146 void 1147 pciide_dma_start(v, channel, drive) 1148 void *v; 1149 int channel, drive; 1150 { 1151 struct pciide_softc *sc = v; 1152 1153 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 1154 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1155 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1156 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1157 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START); 1158 } 1159 1160 int 1161 pciide_dma_finish(v, channel, drive, force) 1162 void *v; 1163 int channel, drive; 1164 int force; 1165 { 1166 struct pciide_softc *sc = v; 1167 u_int8_t status; 1168 int error = 0; 1169 struct pciide_dma_maps *dma_maps = 1170 &sc->pciide_channels[channel].dma_maps[drive]; 1171 1172 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1173 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel); 1174 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1175 DEBUG_XFERS); 1176 1177 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 1178 return WDC_DMAST_NOIRQ; 1179 1180 /* stop DMA channel */ 1181 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1182 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1183 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1184 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START); 1185 1186 /* Unload the map of the data buffer */ 1187 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1188 dma_maps->dmamap_xfer->dm_mapsize, 1189 (dma_maps->dma_flags & WDC_DMA_READ) ? 1190 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1191 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1192 1193 if ((status & IDEDMA_CTL_ERR) != 0) { 1194 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1195 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1196 error |= WDC_DMAST_ERR; 1197 } 1198 1199 if ((status & IDEDMA_CTL_INTR) == 0) { 1200 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1201 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1202 drive, status); 1203 error |= WDC_DMAST_NOIRQ; 1204 } 1205 1206 if ((status & IDEDMA_CTL_ACT) != 0) { 1207 /* data underrun, may be a valid condition for ATAPI */ 1208 error |= WDC_DMAST_UNDER; 1209 } 1210 return error; 1211 } 1212 1213 void 1214 pciide_irqack(chp) 1215 struct channel_softc *chp; 1216 { 1217 struct pciide_channel *cp = (struct pciide_channel*)chp; 1218 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1219 1220 /* clear status bits in IDE DMA registers */ 1221 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1222 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel, 1223 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1224 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel)); 1225 } 1226 1227 /* some common code used by several chip_map */ 1228 int 1229 pciide_chansetup(sc, channel, interface) 1230 struct pciide_softc *sc; 1231 int channel; 1232 pcireg_t interface; 1233 { 1234 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1235 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1236 cp->name = PCIIDE_CHANNEL_NAME(channel); 1237 cp->wdc_channel.channel = channel; 1238 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1239 cp->wdc_channel.ch_queue = 1240 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1241 if (cp->wdc_channel.ch_queue == NULL) { 1242 printf("%s %s channel: " 1243 "can't allocate memory for command queue", 1244 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1245 return 0; 1246 } 1247 printf("%s: %s channel %s to %s mode\n", 1248 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1249 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 1250 "configured" : "wired", 1251 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 1252 "native-PCI" : "compatibility"); 1253 return 1; 1254 } 1255 1256 /* some common code used by several chip channel_map */ 1257 void 1258 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1259 struct pci_attach_args *pa; 1260 struct pciide_channel *cp; 1261 pcireg_t interface; 1262 bus_size_t *cmdsizep, *ctlsizep; 1263 int (*pci_intr) __P((void *)); 1264 { 1265 struct channel_softc *wdc_cp = &cp->wdc_channel; 1266 1267 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1268 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1269 pci_intr); 1270 else 1271 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1272 wdc_cp->channel, cmdsizep, ctlsizep); 1273 1274 if (cp->hw_ok == 0) 1275 return; 1276 wdc_cp->data32iot = wdc_cp->cmd_iot; 1277 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1278 wdcattach(wdc_cp); 1279 } 1280 1281 /* 1282 * Generic code to call to know if a channel can be disabled. Return 1 1283 * if channel can be disabled, 0 if not 1284 */ 1285 int 1286 pciide_chan_candisable(cp) 1287 struct pciide_channel *cp; 1288 { 1289 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1290 struct channel_softc *wdc_cp = &cp->wdc_channel; 1291 1292 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1293 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1294 printf("%s: disabling %s channel (no drives)\n", 1295 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1296 cp->hw_ok = 0; 1297 return 1; 1298 } 1299 return 0; 1300 } 1301 1302 /* 1303 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1304 * Set hw_ok=0 on failure 1305 */ 1306 void 1307 pciide_map_compat_intr(pa, cp, compatchan, interface) 1308 struct pci_attach_args *pa; 1309 struct pciide_channel *cp; 1310 int compatchan, interface; 1311 { 1312 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1313 struct channel_softc *wdc_cp = &cp->wdc_channel; 1314 1315 if (cp->hw_ok == 0) 1316 return; 1317 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1318 return; 1319 1320 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 1321 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1322 pa, compatchan, pciide_compat_intr, cp); 1323 if (cp->ih == NULL) { 1324 #endif 1325 printf("%s: no compatibility interrupt for use by %s " 1326 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1327 cp->hw_ok = 0; 1328 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 1329 } 1330 #endif 1331 } 1332 1333 void 1334 pciide_print_modes(cp) 1335 struct pciide_channel *cp; 1336 { 1337 wdc_print_modes(&cp->wdc_channel); 1338 } 1339 1340 void 1341 default_chip_map(sc, pa) 1342 struct pciide_softc *sc; 1343 struct pci_attach_args *pa; 1344 { 1345 struct pciide_channel *cp; 1346 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1347 pcireg_t csr; 1348 int channel, drive; 1349 struct ata_drive_datas *drvp; 1350 u_int8_t idedma_ctl; 1351 bus_size_t cmdsize, ctlsize; 1352 char *failreason; 1353 1354 if (pciide_chipen(sc, pa) == 0) 1355 return; 1356 1357 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1358 printf("%s: bus-master DMA support present", 1359 sc->sc_wdcdev.sc_dev.dv_xname); 1360 if (sc->sc_pp == &default_product_desc && 1361 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1362 PCIIDE_OPTIONS_DMA) == 0) { 1363 printf(", but unused (no driver support)"); 1364 sc->sc_dma_ok = 0; 1365 } else { 1366 pciide_mapreg_dma(sc, pa); 1367 if (sc->sc_dma_ok != 0) 1368 printf(", used without full driver " 1369 "support"); 1370 } 1371 } else { 1372 printf("%s: hardware does not support DMA", 1373 sc->sc_wdcdev.sc_dev.dv_xname); 1374 sc->sc_dma_ok = 0; 1375 } 1376 printf("\n"); 1377 if (sc->sc_dma_ok) { 1378 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1379 sc->sc_wdcdev.irqack = pciide_irqack; 1380 } 1381 sc->sc_wdcdev.PIO_cap = 0; 1382 sc->sc_wdcdev.DMA_cap = 0; 1383 1384 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1385 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1386 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1387 1388 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1389 cp = &sc->pciide_channels[channel]; 1390 if (pciide_chansetup(sc, channel, interface) == 0) 1391 continue; 1392 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1393 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1394 &ctlsize, pciide_pci_intr); 1395 } else { 1396 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1397 channel, &cmdsize, &ctlsize); 1398 } 1399 if (cp->hw_ok == 0) 1400 continue; 1401 /* 1402 * Check to see if something appears to be there. 1403 */ 1404 failreason = NULL; 1405 if (!wdcprobe(&cp->wdc_channel)) { 1406 failreason = "not responding; disabled or no drives?"; 1407 goto next; 1408 } 1409 /* 1410 * Now, make sure it's actually attributable to this PCI IDE 1411 * channel by trying to access the channel again while the 1412 * PCI IDE controller's I/O space is disabled. (If the 1413 * channel no longer appears to be there, it belongs to 1414 * this controller.) YUCK! 1415 */ 1416 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1417 PCI_COMMAND_STATUS_REG); 1418 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1419 csr & ~PCI_COMMAND_IO_ENABLE); 1420 if (wdcprobe(&cp->wdc_channel)) 1421 failreason = "other hardware responding at addresses"; 1422 pci_conf_write(sc->sc_pc, sc->sc_tag, 1423 PCI_COMMAND_STATUS_REG, csr); 1424 next: 1425 if (failreason) { 1426 printf("%s: %s channel ignored (%s)\n", 1427 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1428 failreason); 1429 cp->hw_ok = 0; 1430 bus_space_unmap(cp->wdc_channel.cmd_iot, 1431 cp->wdc_channel.cmd_ioh, cmdsize); 1432 if (interface & PCIIDE_INTERFACE_PCI(channel)) 1433 bus_space_unmap(cp->wdc_channel.ctl_iot, 1434 cp->ctl_baseioh, ctlsize); 1435 else 1436 bus_space_unmap(cp->wdc_channel.ctl_iot, 1437 cp->wdc_channel.ctl_ioh, ctlsize); 1438 } else { 1439 pciide_map_compat_intr(pa, cp, channel, interface); 1440 } 1441 if (cp->hw_ok) { 1442 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1443 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1444 wdcattach(&cp->wdc_channel); 1445 } 1446 } 1447 1448 if (sc->sc_dma_ok == 0) 1449 return; 1450 1451 /* Allocate DMA maps */ 1452 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1453 idedma_ctl = 0; 1454 cp = &sc->pciide_channels[channel]; 1455 for (drive = 0; drive < 2; drive++) { 1456 drvp = &cp->wdc_channel.ch_drive[drive]; 1457 /* If no drive, skip */ 1458 if ((drvp->drive_flags & DRIVE) == 0) 1459 continue; 1460 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1461 continue; 1462 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1463 /* Abort DMA setup */ 1464 printf("%s:%d:%d: can't allocate DMA maps, " 1465 "using PIO transfers\n", 1466 sc->sc_wdcdev.sc_dev.dv_xname, 1467 channel, drive); 1468 drvp->drive_flags &= ~DRIVE_DMA; 1469 } 1470 printf("%s:%d:%d: using DMA data transfers\n", 1471 sc->sc_wdcdev.sc_dev.dv_xname, 1472 channel, drive); 1473 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1474 } 1475 if (idedma_ctl != 0) { 1476 /* Add software bits in status register */ 1477 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1478 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1479 idedma_ctl); 1480 } 1481 } 1482 } 1483 1484 void 1485 piix_chip_map(sc, pa) 1486 struct pciide_softc *sc; 1487 struct pci_attach_args *pa; 1488 { 1489 struct pciide_channel *cp; 1490 int channel; 1491 u_int32_t idetim; 1492 bus_size_t cmdsize, ctlsize; 1493 1494 if (pciide_chipen(sc, pa) == 0) 1495 return; 1496 1497 printf("%s: bus-master DMA support present", 1498 sc->sc_wdcdev.sc_dev.dv_xname); 1499 pciide_mapreg_dma(sc, pa); 1500 printf("\n"); 1501 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1502 WDC_CAPABILITY_MODE; 1503 if (sc->sc_dma_ok) { 1504 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1505 sc->sc_wdcdev.irqack = pciide_irqack; 1506 switch(sc->sc_pp->ide_product) { 1507 case PCI_PRODUCT_INTEL_82371AB_IDE: 1508 case PCI_PRODUCT_INTEL_82440MX_IDE: 1509 case PCI_PRODUCT_INTEL_82801AA_IDE: 1510 case PCI_PRODUCT_INTEL_82801AB_IDE: 1511 case PCI_PRODUCT_INTEL_82801BA_IDE: 1512 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1513 case PCI_PRODUCT_INTEL_82801CA_IDE_1: 1514 case PCI_PRODUCT_INTEL_82801CA_IDE_2: 1515 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1516 } 1517 } 1518 sc->sc_wdcdev.PIO_cap = 4; 1519 sc->sc_wdcdev.DMA_cap = 2; 1520 switch(sc->sc_pp->ide_product) { 1521 case PCI_PRODUCT_INTEL_82801AA_IDE: 1522 sc->sc_wdcdev.UDMA_cap = 4; 1523 break; 1524 case PCI_PRODUCT_INTEL_82801BA_IDE: 1525 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1526 case PCI_PRODUCT_INTEL_82801CA_IDE_1: 1527 case PCI_PRODUCT_INTEL_82801CA_IDE_2: 1528 sc->sc_wdcdev.UDMA_cap = 5; 1529 break; 1530 default: 1531 sc->sc_wdcdev.UDMA_cap = 2; 1532 } 1533 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1534 sc->sc_wdcdev.set_modes = piix_setup_channel; 1535 else 1536 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1537 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1538 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1539 1540 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1541 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1542 DEBUG_PROBE); 1543 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1544 WDCDEBUG_PRINT((", sidetim=0x%x", 1545 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1546 DEBUG_PROBE); 1547 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1548 WDCDEBUG_PRINT((", udamreg 0x%x", 1549 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1550 DEBUG_PROBE); 1551 } 1552 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1553 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1554 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1555 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1556 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1557 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) { 1558 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1559 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1560 DEBUG_PROBE); 1561 } 1562 1563 } 1564 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1565 1566 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1567 cp = &sc->pciide_channels[channel]; 1568 /* PIIX is compat-only */ 1569 if (pciide_chansetup(sc, channel, 0) == 0) 1570 continue; 1571 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1572 if ((PIIX_IDETIM_READ(idetim, channel) & 1573 PIIX_IDETIM_IDE) == 0) { 1574 printf("%s: %s channel ignored (disabled)\n", 1575 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1576 continue; 1577 } 1578 /* PIIX are compat-only pciide devices */ 1579 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1580 if (cp->hw_ok == 0) 1581 continue; 1582 if (pciide_chan_candisable(cp)) { 1583 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1584 channel); 1585 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1586 idetim); 1587 } 1588 pciide_map_compat_intr(pa, cp, channel, 0); 1589 if (cp->hw_ok == 0) 1590 continue; 1591 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1592 } 1593 1594 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1595 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1596 DEBUG_PROBE); 1597 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1598 WDCDEBUG_PRINT((", sidetim=0x%x", 1599 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1600 DEBUG_PROBE); 1601 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1602 WDCDEBUG_PRINT((", udamreg 0x%x", 1603 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1604 DEBUG_PROBE); 1605 } 1606 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1607 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1608 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1609 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1610 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1611 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) { 1612 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1613 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1614 DEBUG_PROBE); 1615 } 1616 } 1617 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1618 } 1619 1620 void 1621 piix_setup_channel(chp) 1622 struct channel_softc *chp; 1623 { 1624 u_int8_t mode[2], drive; 1625 u_int32_t oidetim, idetim, idedma_ctl; 1626 struct pciide_channel *cp = (struct pciide_channel*)chp; 1627 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1628 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1629 1630 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1631 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1632 idedma_ctl = 0; 1633 1634 /* set up new idetim: Enable IDE registers decode */ 1635 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1636 chp->channel); 1637 1638 /* setup DMA */ 1639 pciide_channel_dma_setup(cp); 1640 1641 /* 1642 * Here we have to mess up with drives mode: PIIX can't have 1643 * different timings for master and slave drives. 1644 * We need to find the best combination. 1645 */ 1646 1647 /* If both drives supports DMA, take the lower mode */ 1648 if ((drvp[0].drive_flags & DRIVE_DMA) && 1649 (drvp[1].drive_flags & DRIVE_DMA)) { 1650 mode[0] = mode[1] = 1651 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1652 drvp[0].DMA_mode = mode[0]; 1653 drvp[1].DMA_mode = mode[1]; 1654 goto ok; 1655 } 1656 /* 1657 * If only one drive supports DMA, use its mode, and 1658 * put the other one in PIO mode 0 if mode not compatible 1659 */ 1660 if (drvp[0].drive_flags & DRIVE_DMA) { 1661 mode[0] = drvp[0].DMA_mode; 1662 mode[1] = drvp[1].PIO_mode; 1663 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1664 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1665 mode[1] = drvp[1].PIO_mode = 0; 1666 goto ok; 1667 } 1668 if (drvp[1].drive_flags & DRIVE_DMA) { 1669 mode[1] = drvp[1].DMA_mode; 1670 mode[0] = drvp[0].PIO_mode; 1671 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1672 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1673 mode[0] = drvp[0].PIO_mode = 0; 1674 goto ok; 1675 } 1676 /* 1677 * If both drives are not DMA, takes the lower mode, unless 1678 * one of them is PIO mode < 2 1679 */ 1680 if (drvp[0].PIO_mode < 2) { 1681 mode[0] = drvp[0].PIO_mode = 0; 1682 mode[1] = drvp[1].PIO_mode; 1683 } else if (drvp[1].PIO_mode < 2) { 1684 mode[1] = drvp[1].PIO_mode = 0; 1685 mode[0] = drvp[0].PIO_mode; 1686 } else { 1687 mode[0] = mode[1] = 1688 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1689 drvp[0].PIO_mode = mode[0]; 1690 drvp[1].PIO_mode = mode[1]; 1691 } 1692 ok: /* The modes are setup */ 1693 for (drive = 0; drive < 2; drive++) { 1694 if (drvp[drive].drive_flags & DRIVE_DMA) { 1695 idetim |= piix_setup_idetim_timings( 1696 mode[drive], 1, chp->channel); 1697 goto end; 1698 } 1699 } 1700 /* If we are there, none of the drives are DMA */ 1701 if (mode[0] >= 2) 1702 idetim |= piix_setup_idetim_timings( 1703 mode[0], 0, chp->channel); 1704 else 1705 idetim |= piix_setup_idetim_timings( 1706 mode[1], 0, chp->channel); 1707 end: /* 1708 * timing mode is now set up in the controller. Enable 1709 * it per-drive 1710 */ 1711 for (drive = 0; drive < 2; drive++) { 1712 /* If no drive, skip */ 1713 if ((drvp[drive].drive_flags & DRIVE) == 0) 1714 continue; 1715 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1716 if (drvp[drive].drive_flags & DRIVE_DMA) 1717 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1718 } 1719 if (idedma_ctl != 0) { 1720 /* Add software bits in status register */ 1721 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1722 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1723 idedma_ctl); 1724 } 1725 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1726 pciide_print_modes(cp); 1727 } 1728 1729 void 1730 piix3_4_setup_channel(chp) 1731 struct channel_softc *chp; 1732 { 1733 struct ata_drive_datas *drvp; 1734 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1735 struct pciide_channel *cp = (struct pciide_channel*)chp; 1736 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1737 int drive; 1738 int channel = chp->channel; 1739 1740 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1741 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1742 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1743 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1744 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1745 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1746 PIIX_SIDETIM_RTC_MASK(channel)); 1747 1748 idedma_ctl = 0; 1749 /* If channel disabled, no need to go further */ 1750 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1751 return; 1752 /* set up new idetim: Enable IDE registers decode */ 1753 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1754 1755 /* setup DMA if needed */ 1756 pciide_channel_dma_setup(cp); 1757 1758 for (drive = 0; drive < 2; drive++) { 1759 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 1760 PIIX_UDMATIM_SET(0x3, channel, drive)); 1761 drvp = &chp->ch_drive[drive]; 1762 /* If no drive, skip */ 1763 if ((drvp->drive_flags & DRIVE) == 0) 1764 continue; 1765 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1766 (drvp->drive_flags & DRIVE_UDMA) == 0)) 1767 goto pio; 1768 1769 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1770 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1771 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1772 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1773 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1774 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) { 1775 ideconf |= PIIX_CONFIG_PINGPONG; 1776 } 1777 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1778 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1779 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1780 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) { 1781 /* setup Ultra/100 */ 1782 if (drvp->UDMA_mode > 2 && 1783 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1784 drvp->UDMA_mode = 2; 1785 if (drvp->UDMA_mode > 4) { 1786 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 1787 } else { 1788 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 1789 if (drvp->UDMA_mode > 2) { 1790 ideconf |= PIIX_CONFIG_UDMA66(channel, 1791 drive); 1792 } else { 1793 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 1794 drive); 1795 } 1796 } 1797 } 1798 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 1799 /* setup Ultra/66 */ 1800 if (drvp->UDMA_mode > 2 && 1801 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1802 drvp->UDMA_mode = 2; 1803 if (drvp->UDMA_mode > 2) 1804 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 1805 else 1806 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 1807 } 1808 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1809 (drvp->drive_flags & DRIVE_UDMA)) { 1810 /* use Ultra/DMA */ 1811 drvp->drive_flags &= ~DRIVE_DMA; 1812 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 1813 udmareg |= PIIX_UDMATIM_SET( 1814 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 1815 } else { 1816 /* use Multiword DMA */ 1817 drvp->drive_flags &= ~DRIVE_UDMA; 1818 if (drive == 0) { 1819 idetim |= piix_setup_idetim_timings( 1820 drvp->DMA_mode, 1, channel); 1821 } else { 1822 sidetim |= piix_setup_sidetim_timings( 1823 drvp->DMA_mode, 1, channel); 1824 idetim =PIIX_IDETIM_SET(idetim, 1825 PIIX_IDETIM_SITRE, channel); 1826 } 1827 } 1828 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1829 1830 pio: /* use PIO mode */ 1831 idetim |= piix_setup_idetim_drvs(drvp); 1832 if (drive == 0) { 1833 idetim |= piix_setup_idetim_timings( 1834 drvp->PIO_mode, 0, channel); 1835 } else { 1836 sidetim |= piix_setup_sidetim_timings( 1837 drvp->PIO_mode, 0, channel); 1838 idetim =PIIX_IDETIM_SET(idetim, 1839 PIIX_IDETIM_SITRE, channel); 1840 } 1841 } 1842 if (idedma_ctl != 0) { 1843 /* Add software bits in status register */ 1844 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1845 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1846 idedma_ctl); 1847 } 1848 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1849 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 1850 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 1851 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 1852 pciide_print_modes(cp); 1853 } 1854 1855 1856 /* setup ISP and RTC fields, based on mode */ 1857 static u_int32_t 1858 piix_setup_idetim_timings(mode, dma, channel) 1859 u_int8_t mode; 1860 u_int8_t dma; 1861 u_int8_t channel; 1862 { 1863 1864 if (dma) 1865 return PIIX_IDETIM_SET(0, 1866 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 1867 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 1868 channel); 1869 else 1870 return PIIX_IDETIM_SET(0, 1871 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 1872 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 1873 channel); 1874 } 1875 1876 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 1877 static u_int32_t 1878 piix_setup_idetim_drvs(drvp) 1879 struct ata_drive_datas *drvp; 1880 { 1881 u_int32_t ret = 0; 1882 struct channel_softc *chp = drvp->chnl_softc; 1883 u_int8_t channel = chp->channel; 1884 u_int8_t drive = drvp->drive; 1885 1886 /* 1887 * If drive is using UDMA, timings setups are independant 1888 * So just check DMA and PIO here. 1889 */ 1890 if (drvp->drive_flags & DRIVE_DMA) { 1891 /* if mode = DMA mode 0, use compatible timings */ 1892 if ((drvp->drive_flags & DRIVE_DMA) && 1893 drvp->DMA_mode == 0) { 1894 drvp->PIO_mode = 0; 1895 return ret; 1896 } 1897 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1898 /* 1899 * PIO and DMA timings are the same, use fast timings for PIO 1900 * too, else use compat timings. 1901 */ 1902 if ((piix_isp_pio[drvp->PIO_mode] != 1903 piix_isp_dma[drvp->DMA_mode]) || 1904 (piix_rtc_pio[drvp->PIO_mode] != 1905 piix_rtc_dma[drvp->DMA_mode])) 1906 drvp->PIO_mode = 0; 1907 /* if PIO mode <= 2, use compat timings for PIO */ 1908 if (drvp->PIO_mode <= 2) { 1909 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 1910 channel); 1911 return ret; 1912 } 1913 } 1914 1915 /* 1916 * Now setup PIO modes. If mode < 2, use compat timings. 1917 * Else enable fast timings. Enable IORDY and prefetch/post 1918 * if PIO mode >= 3. 1919 */ 1920 1921 if (drvp->PIO_mode < 2) 1922 return ret; 1923 1924 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1925 if (drvp->PIO_mode >= 3) { 1926 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 1927 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 1928 } 1929 return ret; 1930 } 1931 1932 /* setup values in SIDETIM registers, based on mode */ 1933 static u_int32_t 1934 piix_setup_sidetim_timings(mode, dma, channel) 1935 u_int8_t mode; 1936 u_int8_t dma; 1937 u_int8_t channel; 1938 { 1939 if (dma) 1940 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 1941 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 1942 else 1943 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 1944 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 1945 } 1946 1947 void 1948 amd7x6_chip_map(sc, pa) 1949 struct pciide_softc *sc; 1950 struct pci_attach_args *pa; 1951 { 1952 struct pciide_channel *cp; 1953 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1954 int channel; 1955 pcireg_t chanenable; 1956 bus_size_t cmdsize, ctlsize; 1957 1958 if (pciide_chipen(sc, pa) == 0) 1959 return; 1960 printf("%s: bus-master DMA support present", 1961 sc->sc_wdcdev.sc_dev.dv_xname); 1962 pciide_mapreg_dma(sc, pa); 1963 printf("\n"); 1964 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1965 WDC_CAPABILITY_MODE; 1966 if (sc->sc_dma_ok) { 1967 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 1968 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 1969 sc->sc_wdcdev.irqack = pciide_irqack; 1970 } 1971 sc->sc_wdcdev.PIO_cap = 4; 1972 sc->sc_wdcdev.DMA_cap = 2; 1973 1974 switch (sc->sc_pp->ide_product) { 1975 case PCI_PRODUCT_AMD_PBC766_IDE: 1976 case PCI_PRODUCT_AMD_PBC768_IDE: 1977 sc->sc_wdcdev.UDMA_cap = 5; 1978 break; 1979 default: 1980 sc->sc_wdcdev.UDMA_cap = 4; 1981 } 1982 sc->sc_wdcdev.set_modes = amd7x6_setup_channel; 1983 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1984 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1985 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN); 1986 1987 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable), 1988 DEBUG_PROBE); 1989 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1990 cp = &sc->pciide_channels[channel]; 1991 if (pciide_chansetup(sc, channel, interface) == 0) 1992 continue; 1993 1994 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) { 1995 printf("%s: %s channel ignored (disabled)\n", 1996 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1997 continue; 1998 } 1999 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2000 pciide_pci_intr); 2001 2002 if (pciide_chan_candisable(cp)) 2003 chanenable &= ~AMD7X6_CHAN_EN(channel); 2004 pciide_map_compat_intr(pa, cp, channel, interface); 2005 if (cp->hw_ok == 0) 2006 continue; 2007 2008 amd7x6_setup_channel(&cp->wdc_channel); 2009 } 2010 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN, 2011 chanenable); 2012 return; 2013 } 2014 2015 void 2016 amd7x6_setup_channel(chp) 2017 struct channel_softc *chp; 2018 { 2019 u_int32_t udmatim_reg, datatim_reg; 2020 u_int8_t idedma_ctl; 2021 int mode, drive; 2022 struct ata_drive_datas *drvp; 2023 struct pciide_channel *cp = (struct pciide_channel*)chp; 2024 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2025 #ifndef PCIIDE_AMD756_ENABLEDMA 2026 int rev = PCI_REVISION( 2027 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2028 #endif 2029 2030 idedma_ctl = 0; 2031 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM); 2032 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA); 2033 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel); 2034 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel); 2035 2036 /* setup DMA if needed */ 2037 pciide_channel_dma_setup(cp); 2038 2039 for (drive = 0; drive < 2; drive++) { 2040 drvp = &chp->ch_drive[drive]; 2041 /* If no drive, skip */ 2042 if ((drvp->drive_flags & DRIVE) == 0) 2043 continue; 2044 /* add timing values, setup DMA if needed */ 2045 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2046 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2047 mode = drvp->PIO_mode; 2048 goto pio; 2049 } 2050 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2051 (drvp->drive_flags & DRIVE_UDMA)) { 2052 /* use Ultra/DMA */ 2053 drvp->drive_flags &= ~DRIVE_DMA; 2054 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) | 2055 AMD7X6_UDMA_EN_MTH(chp->channel, drive) | 2056 AMD7X6_UDMA_TIME(chp->channel, drive, 2057 amd7x6_udma_tim[drvp->UDMA_mode]); 2058 /* can use PIO timings, MW DMA unused */ 2059 mode = drvp->PIO_mode; 2060 } else { 2061 /* use Multiword DMA, but only if revision is OK */ 2062 drvp->drive_flags &= ~DRIVE_UDMA; 2063 #ifndef PCIIDE_AMD756_ENABLEDMA 2064 /* 2065 * The workaround doesn't seem to be necessary 2066 * with all drives, so it can be disabled by 2067 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 2068 * triggered. 2069 */ 2070 if (sc->sc_pp->ide_product == 2071 PCI_PRODUCT_AMD_PBC756_IDE && 2072 AMD756_CHIPREV_DISABLEDMA(rev)) { 2073 printf("%s:%d:%d: multi-word DMA disabled due " 2074 "to chip revision\n", 2075 sc->sc_wdcdev.sc_dev.dv_xname, 2076 chp->channel, drive); 2077 mode = drvp->PIO_mode; 2078 drvp->drive_flags &= ~DRIVE_DMA; 2079 goto pio; 2080 } 2081 #endif 2082 /* mode = min(pio, dma+2) */ 2083 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2084 mode = drvp->PIO_mode; 2085 else 2086 mode = drvp->DMA_mode + 2; 2087 } 2088 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2089 2090 pio: /* setup PIO mode */ 2091 if (mode <= 2) { 2092 drvp->DMA_mode = 0; 2093 drvp->PIO_mode = 0; 2094 mode = 0; 2095 } else { 2096 drvp->PIO_mode = mode; 2097 drvp->DMA_mode = mode - 2; 2098 } 2099 datatim_reg |= 2100 AMD7X6_DATATIM_PULSE(chp->channel, drive, 2101 amd7x6_pio_set[mode]) | 2102 AMD7X6_DATATIM_RECOV(chp->channel, drive, 2103 amd7x6_pio_rec[mode]); 2104 } 2105 if (idedma_ctl != 0) { 2106 /* Add software bits in status register */ 2107 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2108 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2109 idedma_ctl); 2110 } 2111 pciide_print_modes(cp); 2112 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg); 2113 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg); 2114 } 2115 2116 void 2117 apollo_chip_map(sc, pa) 2118 struct pciide_softc *sc; 2119 struct pci_attach_args *pa; 2120 { 2121 struct pciide_channel *cp; 2122 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2123 int channel; 2124 u_int32_t ideconf; 2125 bus_size_t cmdsize, ctlsize; 2126 pcitag_t pcib_tag; 2127 pcireg_t pcib_id, pcib_class; 2128 2129 if (pciide_chipen(sc, pa) == 0) 2130 return; 2131 /* get a PCI tag for the ISA bridge (function 0 of the same device) */ 2132 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 2133 /* and read ID and rev of the ISA bridge */ 2134 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG); 2135 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG); 2136 printf(": VIA Technologies "); 2137 switch (PCI_PRODUCT(pcib_id)) { 2138 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 2139 printf("VT82C586 (Apollo VP) "); 2140 if(PCI_REVISION(pcib_class) >= 0x02) { 2141 printf("ATA33 controller\n"); 2142 sc->sc_wdcdev.UDMA_cap = 2; 2143 } else { 2144 printf("controller\n"); 2145 sc->sc_wdcdev.UDMA_cap = 0; 2146 } 2147 break; 2148 case PCI_PRODUCT_VIATECH_VT82C596A: 2149 printf("VT82C596A (Apollo Pro) "); 2150 if (PCI_REVISION(pcib_class) >= 0x12) { 2151 printf("ATA66 controller\n"); 2152 sc->sc_wdcdev.UDMA_cap = 4; 2153 } else { 2154 printf("ATA33 controller\n"); 2155 sc->sc_wdcdev.UDMA_cap = 2; 2156 } 2157 break; 2158 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 2159 printf("VT82C686A (Apollo KX133) "); 2160 if (PCI_REVISION(pcib_class) >= 0x40) { 2161 printf("ATA100 controller\n"); 2162 sc->sc_wdcdev.UDMA_cap = 5; 2163 } else { 2164 printf("ATA66 controller\n"); 2165 sc->sc_wdcdev.UDMA_cap = 4; 2166 } 2167 break; 2168 case PCI_PRODUCT_VIATECH_VT8233: 2169 printf("VT8233 ATA100 controller\n"); 2170 sc->sc_wdcdev.UDMA_cap = 5; 2171 break; 2172 default: 2173 printf("unknown ATA controller\n"); 2174 sc->sc_wdcdev.UDMA_cap = 0; 2175 } 2176 2177 printf("%s: bus-master DMA support present", 2178 sc->sc_wdcdev.sc_dev.dv_xname); 2179 pciide_mapreg_dma(sc, pa); 2180 printf("\n"); 2181 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2182 WDC_CAPABILITY_MODE; 2183 if (sc->sc_dma_ok) { 2184 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2185 sc->sc_wdcdev.irqack = pciide_irqack; 2186 if (sc->sc_wdcdev.UDMA_cap > 0) 2187 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2188 } 2189 sc->sc_wdcdev.PIO_cap = 4; 2190 sc->sc_wdcdev.DMA_cap = 2; 2191 sc->sc_wdcdev.set_modes = apollo_setup_channel; 2192 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2193 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2194 2195 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 2196 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2197 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 2198 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 2199 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2200 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 2201 DEBUG_PROBE); 2202 2203 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2204 cp = &sc->pciide_channels[channel]; 2205 if (pciide_chansetup(sc, channel, interface) == 0) 2206 continue; 2207 2208 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 2209 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 2210 printf("%s: %s channel ignored (disabled)\n", 2211 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2212 continue; 2213 } 2214 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2215 pciide_pci_intr); 2216 if (cp->hw_ok == 0) 2217 continue; 2218 if (pciide_chan_candisable(cp)) { 2219 ideconf &= ~APO_IDECONF_EN(channel); 2220 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 2221 ideconf); 2222 } 2223 pciide_map_compat_intr(pa, cp, channel, interface); 2224 2225 if (cp->hw_ok == 0) 2226 continue; 2227 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 2228 } 2229 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2230 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2231 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 2232 } 2233 2234 void 2235 apollo_setup_channel(chp) 2236 struct channel_softc *chp; 2237 { 2238 u_int32_t udmatim_reg, datatim_reg; 2239 u_int8_t idedma_ctl; 2240 int mode, drive; 2241 struct ata_drive_datas *drvp; 2242 struct pciide_channel *cp = (struct pciide_channel*)chp; 2243 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2244 2245 idedma_ctl = 0; 2246 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 2247 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 2248 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 2249 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 2250 2251 /* setup DMA if needed */ 2252 pciide_channel_dma_setup(cp); 2253 2254 for (drive = 0; drive < 2; drive++) { 2255 drvp = &chp->ch_drive[drive]; 2256 /* If no drive, skip */ 2257 if ((drvp->drive_flags & DRIVE) == 0) 2258 continue; 2259 /* add timing values, setup DMA if needed */ 2260 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2261 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2262 mode = drvp->PIO_mode; 2263 goto pio; 2264 } 2265 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2266 (drvp->drive_flags & DRIVE_UDMA)) { 2267 /* use Ultra/DMA */ 2268 drvp->drive_flags &= ~DRIVE_DMA; 2269 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2270 APO_UDMA_EN_MTH(chp->channel, drive); 2271 if (sc->sc_wdcdev.UDMA_cap == 5) { 2272 /* 686b */ 2273 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 2274 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2275 drive, apollo_udma100_tim[drvp->UDMA_mode]); 2276 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 2277 /* 596b or 686a */ 2278 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 2279 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2280 drive, apollo_udma66_tim[drvp->UDMA_mode]); 2281 } else { 2282 /* 596a or 586b */ 2283 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2284 drive, apollo_udma33_tim[drvp->UDMA_mode]); 2285 } 2286 /* can use PIO timings, MW DMA unused */ 2287 mode = drvp->PIO_mode; 2288 } else { 2289 /* use Multiword DMA */ 2290 drvp->drive_flags &= ~DRIVE_UDMA; 2291 /* mode = min(pio, dma+2) */ 2292 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2293 mode = drvp->PIO_mode; 2294 else 2295 mode = drvp->DMA_mode + 2; 2296 } 2297 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2298 2299 pio: /* setup PIO mode */ 2300 if (mode <= 2) { 2301 drvp->DMA_mode = 0; 2302 drvp->PIO_mode = 0; 2303 mode = 0; 2304 } else { 2305 drvp->PIO_mode = mode; 2306 drvp->DMA_mode = mode - 2; 2307 } 2308 datatim_reg |= 2309 APO_DATATIM_PULSE(chp->channel, drive, 2310 apollo_pio_set[mode]) | 2311 APO_DATATIM_RECOV(chp->channel, drive, 2312 apollo_pio_rec[mode]); 2313 } 2314 if (idedma_ctl != 0) { 2315 /* Add software bits in status register */ 2316 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2317 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2318 idedma_ctl); 2319 } 2320 pciide_print_modes(cp); 2321 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2322 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2323 } 2324 2325 void 2326 cmd_channel_map(pa, sc, channel) 2327 struct pci_attach_args *pa; 2328 struct pciide_softc *sc; 2329 int channel; 2330 { 2331 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2332 bus_size_t cmdsize, ctlsize; 2333 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2334 int interface, one_channel; 2335 2336 /* 2337 * The 0648/0649 can be told to identify as a RAID controller. 2338 * In this case, we have to fake interface 2339 */ 2340 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2341 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2342 PCIIDE_INTERFACE_SETTABLE(1); 2343 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2344 CMD_CONF_DSA1) 2345 interface |= PCIIDE_INTERFACE_PCI(0) | 2346 PCIIDE_INTERFACE_PCI(1); 2347 } else { 2348 interface = PCI_INTERFACE(pa->pa_class); 2349 } 2350 2351 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2352 cp->name = PCIIDE_CHANNEL_NAME(channel); 2353 cp->wdc_channel.channel = channel; 2354 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2355 2356 /* 2357 * Older CMD64X doesn't have independant channels 2358 */ 2359 switch (sc->sc_pp->ide_product) { 2360 case PCI_PRODUCT_CMDTECH_649: 2361 one_channel = 0; 2362 break; 2363 default: 2364 one_channel = 1; 2365 break; 2366 } 2367 2368 if (channel > 0 && one_channel) { 2369 cp->wdc_channel.ch_queue = 2370 sc->pciide_channels[0].wdc_channel.ch_queue; 2371 } else { 2372 cp->wdc_channel.ch_queue = 2373 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2374 } 2375 if (cp->wdc_channel.ch_queue == NULL) { 2376 printf("%s %s channel: " 2377 "can't allocate memory for command queue", 2378 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2379 return; 2380 } 2381 2382 printf("%s: %s channel %s to %s mode\n", 2383 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2384 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2385 "configured" : "wired", 2386 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2387 "native-PCI" : "compatibility"); 2388 2389 /* 2390 * with a CMD PCI64x, if we get here, the first channel is enabled: 2391 * there's no way to disable the first channel without disabling 2392 * the whole device 2393 */ 2394 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2395 printf("%s: %s channel ignored (disabled)\n", 2396 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2397 return; 2398 } 2399 2400 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2401 if (cp->hw_ok == 0) 2402 return; 2403 if (channel == 1) { 2404 if (pciide_chan_candisable(cp)) { 2405 ctrl &= ~CMD_CTRL_2PORT; 2406 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2407 CMD_CTRL, ctrl); 2408 } 2409 } 2410 pciide_map_compat_intr(pa, cp, channel, interface); 2411 } 2412 2413 int 2414 cmd_pci_intr(arg) 2415 void *arg; 2416 { 2417 struct pciide_softc *sc = arg; 2418 struct pciide_channel *cp; 2419 struct channel_softc *wdc_cp; 2420 int i, rv, crv; 2421 u_int32_t priirq, secirq; 2422 2423 rv = 0; 2424 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2425 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2426 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2427 cp = &sc->pciide_channels[i]; 2428 wdc_cp = &cp->wdc_channel; 2429 /* If a compat channel skip. */ 2430 if (cp->compat) 2431 continue; 2432 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2433 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2434 crv = wdcintr(wdc_cp); 2435 if (crv == 0) 2436 printf("%s:%d: bogus intr\n", 2437 sc->sc_wdcdev.sc_dev.dv_xname, i); 2438 else 2439 rv = 1; 2440 } 2441 } 2442 return rv; 2443 } 2444 2445 void 2446 cmd_chip_map(sc, pa) 2447 struct pciide_softc *sc; 2448 struct pci_attach_args *pa; 2449 { 2450 int channel; 2451 2452 /* 2453 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2454 * and base adresses registers can be disabled at 2455 * hardware level. In this case, the device is wired 2456 * in compat mode and its first channel is always enabled, 2457 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2458 * In fact, it seems that the first channel of the CMD PCI0640 2459 * can't be disabled. 2460 */ 2461 2462 #ifdef PCIIDE_CMD064x_DISABLE 2463 if (pciide_chipen(sc, pa) == 0) 2464 return; 2465 #endif 2466 2467 printf("%s: hardware does not support DMA\n", 2468 sc->sc_wdcdev.sc_dev.dv_xname); 2469 sc->sc_dma_ok = 0; 2470 2471 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2472 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2473 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2474 2475 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2476 cmd_channel_map(pa, sc, channel); 2477 } 2478 } 2479 2480 void 2481 cmd0643_9_chip_map(sc, pa) 2482 struct pciide_softc *sc; 2483 struct pci_attach_args *pa; 2484 { 2485 struct pciide_channel *cp; 2486 int channel; 2487 pcireg_t rev = PCI_REVISION(pa->pa_class); 2488 2489 /* 2490 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2491 * and base adresses registers can be disabled at 2492 * hardware level. In this case, the device is wired 2493 * in compat mode and its first channel is always enabled, 2494 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2495 * In fact, it seems that the first channel of the CMD PCI0640 2496 * can't be disabled. 2497 */ 2498 2499 #ifdef PCIIDE_CMD064x_DISABLE 2500 if (pciide_chipen(sc, pa) == 0) 2501 return; 2502 #endif 2503 printf("%s: bus-master DMA support present", 2504 sc->sc_wdcdev.sc_dev.dv_xname); 2505 pciide_mapreg_dma(sc, pa); 2506 printf("\n"); 2507 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2508 WDC_CAPABILITY_MODE; 2509 if (sc->sc_dma_ok) { 2510 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2511 switch (sc->sc_pp->ide_product) { 2512 case PCI_PRODUCT_CMDTECH_649: 2513 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2514 sc->sc_wdcdev.UDMA_cap = 5; 2515 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2516 break; 2517 case PCI_PRODUCT_CMDTECH_648: 2518 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2519 sc->sc_wdcdev.UDMA_cap = 4; 2520 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2521 break; 2522 case PCI_PRODUCT_CMDTECH_646: 2523 if (rev >= CMD0646U2_REV) { 2524 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2525 sc->sc_wdcdev.UDMA_cap = 2; 2526 } else if (rev >= CMD0646U_REV) { 2527 /* 2528 * Linux's driver claims that the 646U is broken 2529 * with UDMA. Only enable it if we know what we're 2530 * doing 2531 */ 2532 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2533 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2534 sc->sc_wdcdev.UDMA_cap = 2; 2535 #endif 2536 /* explicitly disable UDMA */ 2537 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2538 CMD_UDMATIM(0), 0); 2539 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2540 CMD_UDMATIM(1), 0); 2541 } 2542 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2543 break; 2544 default: 2545 sc->sc_wdcdev.irqack = pciide_irqack; 2546 } 2547 } 2548 2549 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2550 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2551 sc->sc_wdcdev.PIO_cap = 4; 2552 sc->sc_wdcdev.DMA_cap = 2; 2553 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2554 2555 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2556 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2557 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2558 DEBUG_PROBE); 2559 2560 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2561 cp = &sc->pciide_channels[channel]; 2562 cmd_channel_map(pa, sc, channel); 2563 if (cp->hw_ok == 0) 2564 continue; 2565 cmd0643_9_setup_channel(&cp->wdc_channel); 2566 } 2567 /* 2568 * note - this also makes sure we clear the irq disable and reset 2569 * bits 2570 */ 2571 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2572 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2573 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2574 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2575 DEBUG_PROBE); 2576 } 2577 2578 void 2579 cmd0643_9_setup_channel(chp) 2580 struct channel_softc *chp; 2581 { 2582 struct ata_drive_datas *drvp; 2583 u_int8_t tim; 2584 u_int32_t idedma_ctl, udma_reg; 2585 int drive; 2586 struct pciide_channel *cp = (struct pciide_channel*)chp; 2587 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2588 2589 idedma_ctl = 0; 2590 /* setup DMA if needed */ 2591 pciide_channel_dma_setup(cp); 2592 2593 for (drive = 0; drive < 2; drive++) { 2594 drvp = &chp->ch_drive[drive]; 2595 /* If no drive, skip */ 2596 if ((drvp->drive_flags & DRIVE) == 0) 2597 continue; 2598 /* add timing values, setup DMA if needed */ 2599 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2600 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2601 if (drvp->drive_flags & DRIVE_UDMA) { 2602 /* UltraDMA on a 646U2, 0648 or 0649 */ 2603 drvp->drive_flags &= ~DRIVE_DMA; 2604 udma_reg = pciide_pci_read(sc->sc_pc, 2605 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2606 if (drvp->UDMA_mode > 2 && 2607 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2608 CMD_BICSR) & 2609 CMD_BICSR_80(chp->channel)) == 0) 2610 drvp->UDMA_mode = 2; 2611 if (drvp->UDMA_mode > 2) 2612 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2613 else if (sc->sc_wdcdev.UDMA_cap > 2) 2614 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2615 udma_reg |= CMD_UDMATIM_UDMA(drive); 2616 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2617 CMD_UDMATIM_TIM_OFF(drive)); 2618 udma_reg |= 2619 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 2620 CMD_UDMATIM_TIM_OFF(drive)); 2621 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2622 CMD_UDMATIM(chp->channel), udma_reg); 2623 } else { 2624 /* 2625 * use Multiword DMA. 2626 * Timings will be used for both PIO and DMA, 2627 * so adjust DMA mode if needed 2628 * if we have a 0646U2/8/9, turn off UDMA 2629 */ 2630 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2631 udma_reg = pciide_pci_read(sc->sc_pc, 2632 sc->sc_tag, 2633 CMD_UDMATIM(chp->channel)); 2634 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2635 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2636 CMD_UDMATIM(chp->channel), 2637 udma_reg); 2638 } 2639 if (drvp->PIO_mode >= 3 && 2640 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2641 drvp->DMA_mode = drvp->PIO_mode - 2; 2642 } 2643 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2644 } 2645 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2646 } 2647 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2648 CMD_DATA_TIM(chp->channel, drive), tim); 2649 } 2650 if (idedma_ctl != 0) { 2651 /* Add software bits in status register */ 2652 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2653 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2654 idedma_ctl); 2655 } 2656 pciide_print_modes(cp); 2657 } 2658 2659 void 2660 cmd646_9_irqack(chp) 2661 struct channel_softc *chp; 2662 { 2663 u_int32_t priirq, secirq; 2664 struct pciide_channel *cp = (struct pciide_channel*)chp; 2665 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2666 2667 if (chp->channel == 0) { 2668 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2669 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2670 } else { 2671 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2672 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2673 } 2674 pciide_irqack(chp); 2675 } 2676 2677 void 2678 cy693_chip_map(sc, pa) 2679 struct pciide_softc *sc; 2680 struct pci_attach_args *pa; 2681 { 2682 struct pciide_channel *cp; 2683 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2684 bus_size_t cmdsize, ctlsize; 2685 2686 if (pciide_chipen(sc, pa) == 0) 2687 return; 2688 /* 2689 * this chip has 2 PCI IDE functions, one for primary and one for 2690 * secondary. So we need to call pciide_mapregs_compat() with 2691 * the real channel 2692 */ 2693 if (pa->pa_function == 1) { 2694 sc->sc_cy_compatchan = 0; 2695 } else if (pa->pa_function == 2) { 2696 sc->sc_cy_compatchan = 1; 2697 } else { 2698 printf("%s: unexpected PCI function %d\n", 2699 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2700 return; 2701 } 2702 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2703 printf("%s: bus-master DMA support present", 2704 sc->sc_wdcdev.sc_dev.dv_xname); 2705 pciide_mapreg_dma(sc, pa); 2706 } else { 2707 printf("%s: hardware does not support DMA", 2708 sc->sc_wdcdev.sc_dev.dv_xname); 2709 sc->sc_dma_ok = 0; 2710 } 2711 printf("\n"); 2712 2713 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 2714 if (sc->sc_cy_handle == NULL) { 2715 printf("%s: unable to map hyperCache control registers\n", 2716 sc->sc_wdcdev.sc_dev.dv_xname); 2717 sc->sc_dma_ok = 0; 2718 } 2719 2720 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2721 WDC_CAPABILITY_MODE; 2722 if (sc->sc_dma_ok) { 2723 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2724 sc->sc_wdcdev.irqack = pciide_irqack; 2725 } 2726 sc->sc_wdcdev.PIO_cap = 4; 2727 sc->sc_wdcdev.DMA_cap = 2; 2728 sc->sc_wdcdev.set_modes = cy693_setup_channel; 2729 2730 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2731 sc->sc_wdcdev.nchannels = 1; 2732 2733 /* Only one channel for this chip; if we are here it's enabled */ 2734 cp = &sc->pciide_channels[0]; 2735 sc->wdc_chanarray[0] = &cp->wdc_channel; 2736 cp->name = PCIIDE_CHANNEL_NAME(0); 2737 cp->wdc_channel.channel = 0; 2738 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2739 cp->wdc_channel.ch_queue = 2740 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2741 if (cp->wdc_channel.ch_queue == NULL) { 2742 printf("%s primary channel: " 2743 "can't allocate memory for command queue", 2744 sc->sc_wdcdev.sc_dev.dv_xname); 2745 return; 2746 } 2747 printf("%s: primary channel %s to ", 2748 sc->sc_wdcdev.sc_dev.dv_xname, 2749 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 2750 "configured" : "wired"); 2751 if (interface & PCIIDE_INTERFACE_PCI(0)) { 2752 printf("native-PCI"); 2753 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 2754 pciide_pci_intr); 2755 } else { 2756 printf("compatibility"); 2757 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 2758 &cmdsize, &ctlsize); 2759 } 2760 printf(" mode\n"); 2761 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2762 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2763 wdcattach(&cp->wdc_channel); 2764 if (pciide_chan_candisable(cp)) { 2765 pci_conf_write(sc->sc_pc, sc->sc_tag, 2766 PCI_COMMAND_STATUS_REG, 0); 2767 } 2768 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 2769 if (cp->hw_ok == 0) 2770 return; 2771 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 2772 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 2773 cy693_setup_channel(&cp->wdc_channel); 2774 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 2775 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 2776 } 2777 2778 void 2779 cy693_setup_channel(chp) 2780 struct channel_softc *chp; 2781 { 2782 struct ata_drive_datas *drvp; 2783 int drive; 2784 u_int32_t cy_cmd_ctrl; 2785 u_int32_t idedma_ctl; 2786 struct pciide_channel *cp = (struct pciide_channel*)chp; 2787 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2788 int dma_mode = -1; 2789 2790 cy_cmd_ctrl = idedma_ctl = 0; 2791 2792 /* setup DMA if needed */ 2793 pciide_channel_dma_setup(cp); 2794 2795 for (drive = 0; drive < 2; drive++) { 2796 drvp = &chp->ch_drive[drive]; 2797 /* If no drive, skip */ 2798 if ((drvp->drive_flags & DRIVE) == 0) 2799 continue; 2800 /* add timing values, setup DMA if needed */ 2801 if (drvp->drive_flags & DRIVE_DMA) { 2802 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2803 /* use Multiword DMA */ 2804 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 2805 dma_mode = drvp->DMA_mode; 2806 } 2807 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2808 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 2809 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2810 CY_CMD_CTRL_IOW_REC_OFF(drive)); 2811 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2812 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 2813 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2814 CY_CMD_CTRL_IOR_REC_OFF(drive)); 2815 } 2816 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 2817 chp->ch_drive[0].DMA_mode = dma_mode; 2818 chp->ch_drive[1].DMA_mode = dma_mode; 2819 2820 if (dma_mode == -1) 2821 dma_mode = 0; 2822 2823 if (sc->sc_cy_handle != NULL) { 2824 /* Note: `multiple' is implied. */ 2825 cy82c693_write(sc->sc_cy_handle, 2826 (sc->sc_cy_compatchan == 0) ? 2827 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 2828 } 2829 2830 pciide_print_modes(cp); 2831 2832 if (idedma_ctl != 0) { 2833 /* Add software bits in status register */ 2834 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2835 IDEDMA_CTL, idedma_ctl); 2836 } 2837 } 2838 2839 static int 2840 sis_hostbr_match(pa) 2841 struct pci_attach_args *pa; 2842 { 2843 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) && 2844 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) || 2845 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) || 2846 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) || 2847 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735))); 2848 } 2849 2850 void 2851 sis_chip_map(sc, pa) 2852 struct pciide_softc *sc; 2853 struct pci_attach_args *pa; 2854 { 2855 struct pciide_channel *cp; 2856 int channel; 2857 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 2858 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2859 pcireg_t rev = PCI_REVISION(pa->pa_class); 2860 bus_size_t cmdsize, ctlsize; 2861 pcitag_t pchb_tag; 2862 pcireg_t pchb_id, pchb_class; 2863 2864 if (pciide_chipen(sc, pa) == 0) 2865 return; 2866 printf("%s: bus-master DMA support present", 2867 sc->sc_wdcdev.sc_dev.dv_xname); 2868 pciide_mapreg_dma(sc, pa); 2869 printf("\n"); 2870 2871 /* get a PCI tag for the host bridge (function 0 of the same device) */ 2872 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 2873 /* and read ID and rev of the ISA bridge */ 2874 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG); 2875 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG); 2876 2877 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2878 WDC_CAPABILITY_MODE; 2879 if (sc->sc_dma_ok) { 2880 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2881 sc->sc_wdcdev.irqack = pciide_irqack; 2882 /* 2883 * controllers associated to a rev 0x2 530 Host to PCI Bridge 2884 * have problems with UDMA (info provided by Christos) 2885 */ 2886 if (rev >= 0xd0 && 2887 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB || 2888 PCI_REVISION(pchb_class) >= 0x03)) 2889 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2890 } 2891 2892 sc->sc_wdcdev.PIO_cap = 4; 2893 sc->sc_wdcdev.DMA_cap = 2; 2894 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) 2895 /* 2896 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other 2897 * chipsets. 2898 */ 2899 sc->sc_wdcdev.UDMA_cap = 2900 pci_find_device(pa, sis_hostbr_match) ? 5 : 2; 2901 sc->sc_wdcdev.set_modes = sis_setup_channel; 2902 2903 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2904 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2905 2906 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 2907 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 2908 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE); 2909 2910 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2911 cp = &sc->pciide_channels[channel]; 2912 if (pciide_chansetup(sc, channel, interface) == 0) 2913 continue; 2914 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 2915 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 2916 printf("%s: %s channel ignored (disabled)\n", 2917 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2918 continue; 2919 } 2920 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2921 pciide_pci_intr); 2922 if (cp->hw_ok == 0) 2923 continue; 2924 if (pciide_chan_candisable(cp)) { 2925 if (channel == 0) 2926 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 2927 else 2928 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 2929 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 2930 sis_ctr0); 2931 } 2932 pciide_map_compat_intr(pa, cp, channel, interface); 2933 if (cp->hw_ok == 0) 2934 continue; 2935 sis_setup_channel(&cp->wdc_channel); 2936 } 2937 } 2938 2939 void 2940 sis_setup_channel(chp) 2941 struct channel_softc *chp; 2942 { 2943 struct ata_drive_datas *drvp; 2944 int drive; 2945 u_int32_t sis_tim; 2946 u_int32_t idedma_ctl; 2947 struct pciide_channel *cp = (struct pciide_channel*)chp; 2948 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2949 2950 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 2951 "channel %d 0x%x\n", chp->channel, 2952 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 2953 DEBUG_PROBE); 2954 sis_tim = 0; 2955 idedma_ctl = 0; 2956 /* setup DMA if needed */ 2957 pciide_channel_dma_setup(cp); 2958 2959 for (drive = 0; drive < 2; drive++) { 2960 drvp = &chp->ch_drive[drive]; 2961 /* If no drive, skip */ 2962 if ((drvp->drive_flags & DRIVE) == 0) 2963 continue; 2964 /* add timing values, setup DMA if needed */ 2965 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2966 (drvp->drive_flags & DRIVE_UDMA) == 0) 2967 goto pio; 2968 2969 if (drvp->drive_flags & DRIVE_UDMA) { 2970 /* use Ultra/DMA */ 2971 drvp->drive_flags &= ~DRIVE_DMA; 2972 sis_tim |= sis_udma_tim[drvp->UDMA_mode] << 2973 SIS_TIM_UDMA_TIME_OFF(drive); 2974 sis_tim |= SIS_TIM_UDMA_EN(drive); 2975 } else { 2976 /* 2977 * use Multiword DMA 2978 * Timings will be used for both PIO and DMA, 2979 * so adjust DMA mode if needed 2980 */ 2981 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2982 drvp->PIO_mode = drvp->DMA_mode + 2; 2983 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2984 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2985 drvp->PIO_mode - 2 : 0; 2986 if (drvp->DMA_mode == 0) 2987 drvp->PIO_mode = 0; 2988 } 2989 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2990 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] << 2991 SIS_TIM_ACT_OFF(drive); 2992 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 2993 SIS_TIM_REC_OFF(drive); 2994 } 2995 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 2996 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 2997 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 2998 if (idedma_ctl != 0) { 2999 /* Add software bits in status register */ 3000 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3001 IDEDMA_CTL, idedma_ctl); 3002 } 3003 pciide_print_modes(cp); 3004 } 3005 3006 void 3007 acer_chip_map(sc, pa) 3008 struct pciide_softc *sc; 3009 struct pci_attach_args *pa; 3010 { 3011 struct pciide_channel *cp; 3012 int channel; 3013 pcireg_t cr, interface; 3014 bus_size_t cmdsize, ctlsize; 3015 pcireg_t rev = PCI_REVISION(pa->pa_class); 3016 3017 if (pciide_chipen(sc, pa) == 0) 3018 return; 3019 printf("%s: bus-master DMA support present", 3020 sc->sc_wdcdev.sc_dev.dv_xname); 3021 pciide_mapreg_dma(sc, pa); 3022 printf("\n"); 3023 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3024 WDC_CAPABILITY_MODE; 3025 if (sc->sc_dma_ok) { 3026 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 3027 if (rev >= 0x20) { 3028 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3029 if (rev >= 0xC4) 3030 sc->sc_wdcdev.UDMA_cap = 5; 3031 else if (rev >= 0xC2) 3032 sc->sc_wdcdev.UDMA_cap = 4; 3033 else 3034 sc->sc_wdcdev.UDMA_cap = 2; 3035 } 3036 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3037 sc->sc_wdcdev.irqack = pciide_irqack; 3038 } 3039 3040 sc->sc_wdcdev.PIO_cap = 4; 3041 sc->sc_wdcdev.DMA_cap = 2; 3042 sc->sc_wdcdev.set_modes = acer_setup_channel; 3043 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3044 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3045 3046 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 3047 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 3048 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 3049 3050 /* Enable "microsoft register bits" R/W. */ 3051 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 3052 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 3053 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 3054 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 3055 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 3056 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 3057 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 3058 ~ACER_CHANSTATUSREGS_RO); 3059 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 3060 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 3061 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 3062 /* Don't use cr, re-read the real register content instead */ 3063 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 3064 PCI_CLASS_REG)); 3065 3066 /* From linux: enable "Cable Detection" */ 3067 if (rev >= 0xC2) { 3068 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 3069 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 3070 | ACER_0x4B_CDETECT); 3071 } 3072 3073 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3074 cp = &sc->pciide_channels[channel]; 3075 if (pciide_chansetup(sc, channel, interface) == 0) 3076 continue; 3077 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 3078 printf("%s: %s channel ignored (disabled)\n", 3079 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3080 continue; 3081 } 3082 /* newer controllers seems to lack the ACER_CHIDS. Sigh */ 3083 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3084 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 3085 if (cp->hw_ok == 0) 3086 continue; 3087 if (pciide_chan_candisable(cp)) { 3088 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 3089 pci_conf_write(sc->sc_pc, sc->sc_tag, 3090 PCI_CLASS_REG, cr); 3091 } 3092 pciide_map_compat_intr(pa, cp, channel, interface); 3093 acer_setup_channel(&cp->wdc_channel); 3094 } 3095 } 3096 3097 void 3098 acer_setup_channel(chp) 3099 struct channel_softc *chp; 3100 { 3101 struct ata_drive_datas *drvp; 3102 int drive; 3103 u_int32_t acer_fifo_udma; 3104 u_int32_t idedma_ctl; 3105 struct pciide_channel *cp = (struct pciide_channel*)chp; 3106 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3107 3108 idedma_ctl = 0; 3109 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 3110 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 3111 acer_fifo_udma), DEBUG_PROBE); 3112 /* setup DMA if needed */ 3113 pciide_channel_dma_setup(cp); 3114 3115 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 3116 DRIVE_UDMA) { /* check 80 pins cable */ 3117 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 3118 ACER_0x4A_80PIN(chp->channel)) { 3119 if (chp->ch_drive[0].UDMA_mode > 2) 3120 chp->ch_drive[0].UDMA_mode = 2; 3121 if (chp->ch_drive[1].UDMA_mode > 2) 3122 chp->ch_drive[1].UDMA_mode = 2; 3123 } 3124 } 3125 3126 for (drive = 0; drive < 2; drive++) { 3127 drvp = &chp->ch_drive[drive]; 3128 /* If no drive, skip */ 3129 if ((drvp->drive_flags & DRIVE) == 0) 3130 continue; 3131 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 3132 "channel %d drive %d 0x%x\n", chp->channel, drive, 3133 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3134 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 3135 /* clear FIFO/DMA mode */ 3136 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 3137 ACER_UDMA_EN(chp->channel, drive) | 3138 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 3139 3140 /* add timing values, setup DMA if needed */ 3141 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 3142 (drvp->drive_flags & DRIVE_UDMA) == 0) { 3143 acer_fifo_udma |= 3144 ACER_FTH_OPL(chp->channel, drive, 0x1); 3145 goto pio; 3146 } 3147 3148 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 3149 if (drvp->drive_flags & DRIVE_UDMA) { 3150 /* use Ultra/DMA */ 3151 drvp->drive_flags &= ~DRIVE_DMA; 3152 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 3153 acer_fifo_udma |= 3154 ACER_UDMA_TIM(chp->channel, drive, 3155 acer_udma[drvp->UDMA_mode]); 3156 /* XXX disable if one drive < UDMA3 ? */ 3157 if (drvp->UDMA_mode >= 3) { 3158 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3159 ACER_0x4B, 3160 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3161 ACER_0x4B) | ACER_0x4B_UDMA66); 3162 } 3163 } else { 3164 /* 3165 * use Multiword DMA 3166 * Timings will be used for both PIO and DMA, 3167 * so adjust DMA mode if needed 3168 */ 3169 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3170 drvp->PIO_mode = drvp->DMA_mode + 2; 3171 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3172 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3173 drvp->PIO_mode - 2 : 0; 3174 if (drvp->DMA_mode == 0) 3175 drvp->PIO_mode = 0; 3176 } 3177 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3178 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 3179 ACER_IDETIM(chp->channel, drive), 3180 acer_pio[drvp->PIO_mode]); 3181 } 3182 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 3183 acer_fifo_udma), DEBUG_PROBE); 3184 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 3185 if (idedma_ctl != 0) { 3186 /* Add software bits in status register */ 3187 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3188 IDEDMA_CTL, idedma_ctl); 3189 } 3190 pciide_print_modes(cp); 3191 } 3192 3193 int 3194 acer_pci_intr(arg) 3195 void *arg; 3196 { 3197 struct pciide_softc *sc = arg; 3198 struct pciide_channel *cp; 3199 struct channel_softc *wdc_cp; 3200 int i, rv, crv; 3201 u_int32_t chids; 3202 3203 rv = 0; 3204 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 3205 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3206 cp = &sc->pciide_channels[i]; 3207 wdc_cp = &cp->wdc_channel; 3208 /* If a compat channel skip. */ 3209 if (cp->compat) 3210 continue; 3211 if (chids & ACER_CHIDS_INT(i)) { 3212 crv = wdcintr(wdc_cp); 3213 if (crv == 0) 3214 printf("%s:%d: bogus intr\n", 3215 sc->sc_wdcdev.sc_dev.dv_xname, i); 3216 else 3217 rv = 1; 3218 } 3219 } 3220 return rv; 3221 } 3222 3223 void 3224 hpt_chip_map(sc, pa) 3225 struct pciide_softc *sc; 3226 struct pci_attach_args *pa; 3227 { 3228 struct pciide_channel *cp; 3229 int i, compatchan, revision; 3230 pcireg_t interface; 3231 bus_size_t cmdsize, ctlsize; 3232 3233 if (pciide_chipen(sc, pa) == 0) 3234 return; 3235 revision = PCI_REVISION(pa->pa_class); 3236 printf(": Triones/Highpoint "); 3237 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3238 printf("HPT374 IDE Controller\n"); 3239 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 3240 if (revision == HPT370_REV) 3241 printf("HPT370 IDE Controller\n"); 3242 else if (revision == HPT370A_REV) 3243 printf("HPT370A IDE Controller\n"); 3244 else if (revision == HPT366_REV) 3245 printf("HPT366 IDE Controller\n"); 3246 else 3247 printf("unknown HPT IDE controller rev %d\n", revision); 3248 } else 3249 printf("unknown HPT IDE controller 0x%x\n", 3250 sc->sc_pp->ide_product); 3251 3252 /* 3253 * when the chip is in native mode it identifies itself as a 3254 * 'misc mass storage'. Fake interface in this case. 3255 */ 3256 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3257 interface = PCI_INTERFACE(pa->pa_class); 3258 } else { 3259 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3260 PCIIDE_INTERFACE_PCI(0); 3261 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3262 (revision == HPT370_REV || revision == HPT370A_REV)) || 3263 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3264 interface |= PCIIDE_INTERFACE_PCI(1); 3265 } 3266 3267 printf("%s: bus-master DMA support present", 3268 sc->sc_wdcdev.sc_dev.dv_xname); 3269 pciide_mapreg_dma(sc, pa); 3270 printf("\n"); 3271 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3272 WDC_CAPABILITY_MODE; 3273 if (sc->sc_dma_ok) { 3274 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3275 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3276 sc->sc_wdcdev.irqack = pciide_irqack; 3277 } 3278 sc->sc_wdcdev.PIO_cap = 4; 3279 sc->sc_wdcdev.DMA_cap = 2; 3280 3281 sc->sc_wdcdev.set_modes = hpt_setup_channel; 3282 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3283 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3284 revision == HPT366_REV) { 3285 sc->sc_wdcdev.UDMA_cap = 4; 3286 /* 3287 * The 366 has 2 PCI IDE functions, one for primary and one 3288 * for secondary. So we need to call pciide_mapregs_compat() 3289 * with the real channel 3290 */ 3291 if (pa->pa_function == 0) { 3292 compatchan = 0; 3293 } else if (pa->pa_function == 1) { 3294 compatchan = 1; 3295 } else { 3296 printf("%s: unexpected PCI function %d\n", 3297 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 3298 return; 3299 } 3300 sc->sc_wdcdev.nchannels = 1; 3301 } else { 3302 sc->sc_wdcdev.nchannels = 2; 3303 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3304 sc->sc_wdcdev.UDMA_cap = 6; 3305 else 3306 sc->sc_wdcdev.UDMA_cap = 5; 3307 } 3308 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3309 cp = &sc->pciide_channels[i]; 3310 if (sc->sc_wdcdev.nchannels > 1) { 3311 compatchan = i; 3312 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 3313 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 3314 printf("%s: %s channel ignored (disabled)\n", 3315 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3316 continue; 3317 } 3318 } 3319 if (pciide_chansetup(sc, i, interface) == 0) 3320 continue; 3321 if (interface & PCIIDE_INTERFACE_PCI(i)) { 3322 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 3323 &ctlsize, hpt_pci_intr); 3324 } else { 3325 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 3326 &cmdsize, &ctlsize); 3327 } 3328 if (cp->hw_ok == 0) 3329 return; 3330 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 3331 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 3332 wdcattach(&cp->wdc_channel); 3333 hpt_setup_channel(&cp->wdc_channel); 3334 } 3335 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3336 (revision == HPT370_REV || revision == HPT370A_REV)) || 3337 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 3338 /* 3339 * HPT370_REV and highter has a bit to disable interrupts, 3340 * make sure to clear it 3341 */ 3342 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 3343 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 3344 ~HPT_CSEL_IRQDIS); 3345 } 3346 /* set clocks, etc (mandatory on 374, optional otherwise) */ 3347 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3348 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 3349 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 3350 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 3351 return; 3352 } 3353 3354 void 3355 hpt_setup_channel(chp) 3356 struct channel_softc *chp; 3357 { 3358 struct ata_drive_datas *drvp; 3359 int drive; 3360 int cable; 3361 u_int32_t before, after; 3362 u_int32_t idedma_ctl; 3363 struct pciide_channel *cp = (struct pciide_channel*)chp; 3364 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3365 3366 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 3367 3368 /* setup DMA if needed */ 3369 pciide_channel_dma_setup(cp); 3370 3371 idedma_ctl = 0; 3372 3373 /* Per drive settings */ 3374 for (drive = 0; drive < 2; drive++) { 3375 drvp = &chp->ch_drive[drive]; 3376 /* If no drive, skip */ 3377 if ((drvp->drive_flags & DRIVE) == 0) 3378 continue; 3379 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 3380 HPT_IDETIM(chp->channel, drive)); 3381 3382 /* add timing values, setup DMA if needed */ 3383 if (drvp->drive_flags & DRIVE_UDMA) { 3384 /* use Ultra/DMA */ 3385 drvp->drive_flags &= ~DRIVE_DMA; 3386 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 3387 drvp->UDMA_mode > 2) 3388 drvp->UDMA_mode = 2; 3389 after = (sc->sc_wdcdev.nchannels == 2) ? 3390 ( (sc->sc_wdcdev.UDMA_cap == 6) ? 3391 hpt374_udma[drvp->UDMA_mode] : 3392 hpt370_udma[drvp->UDMA_mode]) : 3393 hpt366_udma[drvp->UDMA_mode]; 3394 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3395 } else if (drvp->drive_flags & DRIVE_DMA) { 3396 /* 3397 * use Multiword DMA. 3398 * Timings will be used for both PIO and DMA, so adjust 3399 * DMA mode if needed 3400 */ 3401 if (drvp->PIO_mode >= 3 && 3402 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3403 drvp->DMA_mode = drvp->PIO_mode - 2; 3404 } 3405 after = (sc->sc_wdcdev.nchannels == 2) ? 3406 ( (sc->sc_wdcdev.UDMA_cap == 6) ? 3407 hpt374_dma[drvp->DMA_mode] : 3408 hpt370_dma[drvp->DMA_mode]) : 3409 hpt366_dma[drvp->DMA_mode]; 3410 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3411 } else { 3412 /* PIO only */ 3413 after = (sc->sc_wdcdev.nchannels == 2) ? 3414 ( (sc->sc_wdcdev.UDMA_cap == 6) ? 3415 hpt374_pio[drvp->PIO_mode] : 3416 hpt370_pio[drvp->PIO_mode]) : 3417 hpt366_pio[drvp->PIO_mode]; 3418 } 3419 pci_conf_write(sc->sc_pc, sc->sc_tag, 3420 HPT_IDETIM(chp->channel, drive), after); 3421 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 3422 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname, 3423 after, before), DEBUG_PROBE); 3424 } 3425 if (idedma_ctl != 0) { 3426 /* Add software bits in status register */ 3427 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3428 IDEDMA_CTL, idedma_ctl); 3429 } 3430 pciide_print_modes(cp); 3431 } 3432 3433 int 3434 hpt_pci_intr(arg) 3435 void *arg; 3436 { 3437 struct pciide_softc *sc = arg; 3438 struct pciide_channel *cp; 3439 struct channel_softc *wdc_cp; 3440 int rv = 0; 3441 int dmastat, i, crv; 3442 3443 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3444 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3445 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3446 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 3447 IDEDMA_CTL_INTR) 3448 continue; 3449 cp = &sc->pciide_channels[i]; 3450 wdc_cp = &cp->wdc_channel; 3451 crv = wdcintr(wdc_cp); 3452 if (crv == 0) { 3453 printf("%s:%d: bogus intr\n", 3454 sc->sc_wdcdev.sc_dev.dv_xname, i); 3455 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3456 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 3457 } else 3458 rv = 1; 3459 } 3460 return rv; 3461 } 3462 3463 3464 /* Macros to test product */ 3465 #define PDC_IS_262(sc) \ 3466 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \ 3467 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3468 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \ 3469 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3470 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3471 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133) 3472 #define PDC_IS_265(sc) \ 3473 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3474 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \ 3475 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3476 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3477 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133) 3478 #define PDC_IS_268(sc) \ 3479 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3480 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3481 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133) 3482 3483 void 3484 pdc202xx_chip_map(sc, pa) 3485 struct pciide_softc *sc; 3486 struct pci_attach_args *pa; 3487 { 3488 struct pciide_channel *cp; 3489 int channel; 3490 pcireg_t interface, st, mode; 3491 bus_size_t cmdsize, ctlsize; 3492 3493 if (!PDC_IS_268(sc)) { 3494 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3495 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 3496 st), DEBUG_PROBE); 3497 } 3498 if (pciide_chipen(sc, pa) == 0) 3499 return; 3500 3501 /* turn off RAID mode */ 3502 if (!PDC_IS_268(sc)) 3503 st &= ~PDC2xx_STATE_IDERAID; 3504 3505 /* 3506 * can't rely on the PCI_CLASS_REG content if the chip was in raid 3507 * mode. We have to fake interface 3508 */ 3509 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 3510 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 3511 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3512 3513 printf("%s: bus-master DMA support present", 3514 sc->sc_wdcdev.sc_dev.dv_xname); 3515 pciide_mapreg_dma(sc, pa); 3516 printf("\n"); 3517 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3518 WDC_CAPABILITY_MODE; 3519 if (sc->sc_dma_ok) { 3520 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3521 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3522 sc->sc_wdcdev.irqack = pciide_irqack; 3523 } 3524 sc->sc_wdcdev.PIO_cap = 4; 3525 sc->sc_wdcdev.DMA_cap = 2; 3526 if (PDC_IS_265(sc)) 3527 sc->sc_wdcdev.UDMA_cap = 5; 3528 else if (PDC_IS_262(sc)) 3529 sc->sc_wdcdev.UDMA_cap = 4; 3530 else 3531 sc->sc_wdcdev.UDMA_cap = 2; 3532 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 3533 pdc20268_setup_channel : pdc202xx_setup_channel; 3534 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3535 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3536 3537 if (!PDC_IS_268(sc)) { 3538 /* setup failsafe defaults */ 3539 mode = 0; 3540 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 3541 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 3542 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 3543 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 3544 for (channel = 0; 3545 channel < sc->sc_wdcdev.nchannels; 3546 channel++) { 3547 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 3548 "drive 0 initial timings 0x%x, now 0x%x\n", 3549 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 3550 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 3551 DEBUG_PROBE); 3552 pci_conf_write(sc->sc_pc, sc->sc_tag, 3553 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 3554 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 3555 "drive 1 initial timings 0x%x, now 0x%x\n", 3556 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 3557 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 3558 pci_conf_write(sc->sc_pc, sc->sc_tag, 3559 PDC2xx_TIM(channel, 1), mode); 3560 } 3561 3562 mode = PDC2xx_SCR_DMA; 3563 if (PDC_IS_262(sc)) { 3564 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 3565 } else { 3566 /* the BIOS set it up this way */ 3567 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 3568 } 3569 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 3570 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 3571 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 3572 "now 0x%x\n", 3573 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3574 PDC2xx_SCR), 3575 mode), DEBUG_PROBE); 3576 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3577 PDC2xx_SCR, mode); 3578 3579 /* controller initial state register is OK even without BIOS */ 3580 /* Set DMA mode to IDE DMA compatibility */ 3581 mode = 3582 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 3583 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 3584 DEBUG_PROBE); 3585 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 3586 mode | 0x1); 3587 mode = 3588 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 3589 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 3590 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 3591 mode | 0x1); 3592 } 3593 3594 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3595 cp = &sc->pciide_channels[channel]; 3596 if (pciide_chansetup(sc, channel, interface) == 0) 3597 continue; 3598 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 3599 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 3600 printf("%s: %s channel ignored (disabled)\n", 3601 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3602 continue; 3603 } 3604 if (PDC_IS_265(sc)) 3605 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3606 pdc20265_pci_intr); 3607 else 3608 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3609 pdc202xx_pci_intr); 3610 if (cp->hw_ok == 0) 3611 continue; 3612 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) 3613 st &= ~(PDC_IS_262(sc) ? 3614 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 3615 pciide_map_compat_intr(pa, cp, channel, interface); 3616 pdc202xx_setup_channel(&cp->wdc_channel); 3617 } 3618 if (!PDC_IS_268(sc)) { 3619 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 3620 "0x%x\n", st), DEBUG_PROBE); 3621 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 3622 } 3623 return; 3624 } 3625 3626 void 3627 pdc202xx_setup_channel(chp) 3628 struct channel_softc *chp; 3629 { 3630 struct ata_drive_datas *drvp; 3631 int drive; 3632 pcireg_t mode, st; 3633 u_int32_t idedma_ctl, scr, atapi; 3634 struct pciide_channel *cp = (struct pciide_channel*)chp; 3635 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3636 int channel = chp->channel; 3637 3638 /* setup DMA if needed */ 3639 pciide_channel_dma_setup(cp); 3640 3641 idedma_ctl = 0; 3642 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 3643 sc->sc_wdcdev.sc_dev.dv_xname, 3644 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 3645 DEBUG_PROBE); 3646 3647 /* Per channel settings */ 3648 if (PDC_IS_262(sc)) { 3649 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3650 PDC262_U66); 3651 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3652 /* Trim UDMA mode */ 3653 if ((st & PDC262_STATE_80P(channel)) != 0 || 3654 (chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3655 chp->ch_drive[0].UDMA_mode <= 2) || 3656 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3657 chp->ch_drive[1].UDMA_mode <= 2)) { 3658 if (chp->ch_drive[0].UDMA_mode > 2) 3659 chp->ch_drive[0].UDMA_mode = 2; 3660 if (chp->ch_drive[1].UDMA_mode > 2) 3661 chp->ch_drive[1].UDMA_mode = 2; 3662 } 3663 /* Set U66 if needed */ 3664 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3665 chp->ch_drive[0].UDMA_mode > 2) || 3666 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3667 chp->ch_drive[1].UDMA_mode > 2)) 3668 scr |= PDC262_U66_EN(channel); 3669 else 3670 scr &= ~PDC262_U66_EN(channel); 3671 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3672 PDC262_U66, scr); 3673 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 3674 sc->sc_wdcdev.sc_dev.dv_xname, channel, 3675 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3676 PDC262_ATAPI(channel))), DEBUG_PROBE); 3677 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 3678 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 3679 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3680 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3681 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 3682 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3683 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3684 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 3685 atapi = 0; 3686 else 3687 atapi = PDC262_ATAPI_UDMA; 3688 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3689 PDC262_ATAPI(channel), atapi); 3690 } 3691 } 3692 for (drive = 0; drive < 2; drive++) { 3693 drvp = &chp->ch_drive[drive]; 3694 /* If no drive, skip */ 3695 if ((drvp->drive_flags & DRIVE) == 0) 3696 continue; 3697 mode = 0; 3698 if (drvp->drive_flags & DRIVE_UDMA) { 3699 /* use Ultra/DMA */ 3700 drvp->drive_flags &= ~DRIVE_DMA; 3701 mode = PDC2xx_TIM_SET_MB(mode, 3702 pdc2xx_udma_mb[drvp->UDMA_mode]); 3703 mode = PDC2xx_TIM_SET_MC(mode, 3704 pdc2xx_udma_mc[drvp->UDMA_mode]); 3705 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3706 } else if (drvp->drive_flags & DRIVE_DMA) { 3707 mode = PDC2xx_TIM_SET_MB(mode, 3708 pdc2xx_dma_mb[drvp->DMA_mode]); 3709 mode = PDC2xx_TIM_SET_MC(mode, 3710 pdc2xx_dma_mc[drvp->DMA_mode]); 3711 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3712 } else { 3713 mode = PDC2xx_TIM_SET_MB(mode, 3714 pdc2xx_dma_mb[0]); 3715 mode = PDC2xx_TIM_SET_MC(mode, 3716 pdc2xx_dma_mc[0]); 3717 } 3718 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 3719 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 3720 if (drvp->drive_flags & DRIVE_ATA) 3721 mode |= PDC2xx_TIM_PRE; 3722 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 3723 if (drvp->PIO_mode >= 3) { 3724 mode |= PDC2xx_TIM_IORDY; 3725 if (drive == 0) 3726 mode |= PDC2xx_TIM_IORDYp; 3727 } 3728 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 3729 "timings 0x%x\n", 3730 sc->sc_wdcdev.sc_dev.dv_xname, 3731 chp->channel, drive, mode), DEBUG_PROBE); 3732 pci_conf_write(sc->sc_pc, sc->sc_tag, 3733 PDC2xx_TIM(chp->channel, drive), mode); 3734 } 3735 if (idedma_ctl != 0) { 3736 /* Add software bits in status register */ 3737 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3738 IDEDMA_CTL, idedma_ctl); 3739 } 3740 pciide_print_modes(cp); 3741 } 3742 3743 void 3744 pdc20268_setup_channel(chp) 3745 struct channel_softc *chp; 3746 { 3747 struct ata_drive_datas *drvp; 3748 int drive; 3749 u_int32_t idedma_ctl; 3750 struct pciide_channel *cp = (struct pciide_channel*)chp; 3751 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3752 int u100; 3753 3754 /* setup DMA if needed */ 3755 pciide_channel_dma_setup(cp); 3756 3757 idedma_ctl = 0; 3758 3759 /* I don't know what this is for, FreeBSD does it ... */ 3760 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3761 IDEDMA_CMD + 0x1, 0x0b); 3762 3763 /* 3764 * I don't know what this is for; FreeBSD checks this ... this is not 3765 * cable type detect. 3766 */ 3767 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3768 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1; 3769 3770 for (drive = 0; drive < 2; drive++) { 3771 drvp = &chp->ch_drive[drive]; 3772 /* If no drive, skip */ 3773 if ((drvp->drive_flags & DRIVE) == 0) 3774 continue; 3775 if (drvp->drive_flags & DRIVE_UDMA) { 3776 /* use Ultra/DMA */ 3777 drvp->drive_flags &= ~DRIVE_DMA; 3778 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3779 if (drvp->UDMA_mode > 2 && u100 == 0) 3780 drvp->UDMA_mode = 2; 3781 } else if (drvp->drive_flags & DRIVE_DMA) { 3782 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3783 } 3784 } 3785 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 3786 if (idedma_ctl != 0) { 3787 /* Add software bits in status register */ 3788 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3789 IDEDMA_CTL, idedma_ctl); 3790 } 3791 pciide_print_modes(cp); 3792 } 3793 3794 int 3795 pdc202xx_pci_intr(arg) 3796 void *arg; 3797 { 3798 struct pciide_softc *sc = arg; 3799 struct pciide_channel *cp; 3800 struct channel_softc *wdc_cp; 3801 int i, rv, crv; 3802 u_int32_t scr; 3803 3804 rv = 0; 3805 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 3806 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3807 cp = &sc->pciide_channels[i]; 3808 wdc_cp = &cp->wdc_channel; 3809 /* If a compat channel skip. */ 3810 if (cp->compat) 3811 continue; 3812 if (scr & PDC2xx_SCR_INT(i)) { 3813 crv = wdcintr(wdc_cp); 3814 if (crv == 0) 3815 printf("%s:%d: bogus intr (reg 0x%x)\n", 3816 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 3817 else 3818 rv = 1; 3819 } 3820 } 3821 return rv; 3822 } 3823 3824 int 3825 pdc20265_pci_intr(arg) 3826 void *arg; 3827 { 3828 struct pciide_softc *sc = arg; 3829 struct pciide_channel *cp; 3830 struct channel_softc *wdc_cp; 3831 int i, rv, crv; 3832 u_int32_t dmastat; 3833 3834 rv = 0; 3835 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3836 cp = &sc->pciide_channels[i]; 3837 wdc_cp = &cp->wdc_channel; 3838 /* If a compat channel skip. */ 3839 if (cp->compat) 3840 continue; 3841 /* 3842 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 3843 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 3844 * So use it instead (requires 2 reg reads instead of 1, 3845 * but we can't do it another way). 3846 */ 3847 dmastat = bus_space_read_1(sc->sc_dma_iot, 3848 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3849 if((dmastat & IDEDMA_CTL_INTR) == 0) 3850 continue; 3851 crv = wdcintr(wdc_cp); 3852 if (crv == 0) 3853 printf("%s:%d: bogus intr\n", 3854 sc->sc_wdcdev.sc_dev.dv_xname, i); 3855 else 3856 rv = 1; 3857 } 3858 return rv; 3859 } 3860 3861 void 3862 opti_chip_map(sc, pa) 3863 struct pciide_softc *sc; 3864 struct pci_attach_args *pa; 3865 { 3866 struct pciide_channel *cp; 3867 bus_size_t cmdsize, ctlsize; 3868 pcireg_t interface; 3869 u_int8_t init_ctrl; 3870 int channel; 3871 3872 if (pciide_chipen(sc, pa) == 0) 3873 return; 3874 printf("%s: bus-master DMA support present", 3875 sc->sc_wdcdev.sc_dev.dv_xname); 3876 3877 /* 3878 * XXXSCW: 3879 * There seem to be a couple of buggy revisions/implementations 3880 * of the OPTi pciide chipset. This kludge seems to fix one of 3881 * the reported problems (PR/11644) but still fails for the 3882 * other (PR/13151), although the latter may be due to other 3883 * issues too... 3884 */ 3885 if (PCI_REVISION(pa->pa_class) <= 0x12) { 3886 printf(" but disabled due to chip rev. <= 0x12"); 3887 sc->sc_dma_ok = 0; 3888 } else 3889 pciide_mapreg_dma(sc, pa); 3890 3891 printf("\n"); 3892 3893 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 | 3894 WDC_CAPABILITY_MODE; 3895 sc->sc_wdcdev.PIO_cap = 4; 3896 if (sc->sc_dma_ok) { 3897 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3898 sc->sc_wdcdev.irqack = pciide_irqack; 3899 sc->sc_wdcdev.DMA_cap = 2; 3900 } 3901 sc->sc_wdcdev.set_modes = opti_setup_channel; 3902 3903 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3904 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3905 3906 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 3907 OPTI_REG_INIT_CONTROL); 3908 3909 interface = PCI_INTERFACE(pa->pa_class); 3910 3911 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3912 cp = &sc->pciide_channels[channel]; 3913 if (pciide_chansetup(sc, channel, interface) == 0) 3914 continue; 3915 if (channel == 1 && 3916 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 3917 printf("%s: %s channel ignored (disabled)\n", 3918 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3919 continue; 3920 } 3921 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3922 pciide_pci_intr); 3923 if (cp->hw_ok == 0) 3924 continue; 3925 pciide_map_compat_intr(pa, cp, channel, interface); 3926 if (cp->hw_ok == 0) 3927 continue; 3928 opti_setup_channel(&cp->wdc_channel); 3929 } 3930 } 3931 3932 void 3933 opti_setup_channel(chp) 3934 struct channel_softc *chp; 3935 { 3936 struct ata_drive_datas *drvp; 3937 struct pciide_channel *cp = (struct pciide_channel*)chp; 3938 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3939 int drive, spd; 3940 int mode[2]; 3941 u_int8_t rv, mr; 3942 3943 /* 3944 * The `Delay' and `Address Setup Time' fields of the 3945 * Miscellaneous Register are always zero initially. 3946 */ 3947 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 3948 mr &= ~(OPTI_MISC_DELAY_MASK | 3949 OPTI_MISC_ADDR_SETUP_MASK | 3950 OPTI_MISC_INDEX_MASK); 3951 3952 /* Prime the control register before setting timing values */ 3953 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 3954 3955 /* Determine the clockrate of the PCIbus the chip is attached to */ 3956 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 3957 spd &= OPTI_STRAP_PCI_SPEED_MASK; 3958 3959 /* setup DMA if needed */ 3960 pciide_channel_dma_setup(cp); 3961 3962 for (drive = 0; drive < 2; drive++) { 3963 drvp = &chp->ch_drive[drive]; 3964 /* If no drive, skip */ 3965 if ((drvp->drive_flags & DRIVE) == 0) { 3966 mode[drive] = -1; 3967 continue; 3968 } 3969 3970 if ((drvp->drive_flags & DRIVE_DMA)) { 3971 /* 3972 * Timings will be used for both PIO and DMA, 3973 * so adjust DMA mode if needed 3974 */ 3975 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3976 drvp->PIO_mode = drvp->DMA_mode + 2; 3977 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3978 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3979 drvp->PIO_mode - 2 : 0; 3980 if (drvp->DMA_mode == 0) 3981 drvp->PIO_mode = 0; 3982 3983 mode[drive] = drvp->DMA_mode + 5; 3984 } else 3985 mode[drive] = drvp->PIO_mode; 3986 3987 if (drive && mode[0] >= 0 && 3988 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 3989 /* 3990 * Can't have two drives using different values 3991 * for `Address Setup Time'. 3992 * Slow down the faster drive to compensate. 3993 */ 3994 int d = (opti_tim_as[spd][mode[0]] > 3995 opti_tim_as[spd][mode[1]]) ? 0 : 1; 3996 3997 mode[d] = mode[1-d]; 3998 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 3999 chp->ch_drive[d].DMA_mode = 0; 4000 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA; 4001 } 4002 } 4003 4004 for (drive = 0; drive < 2; drive++) { 4005 int m; 4006 if ((m = mode[drive]) < 0) 4007 continue; 4008 4009 /* Set the Address Setup Time and select appropriate index */ 4010 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 4011 rv |= OPTI_MISC_INDEX(drive); 4012 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 4013 4014 /* Set the pulse width and recovery timing parameters */ 4015 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 4016 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 4017 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 4018 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 4019 4020 /* Set the Enhanced Mode register appropriately */ 4021 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 4022 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 4023 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 4024 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 4025 } 4026 4027 /* Finally, enable the timings */ 4028 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 4029 4030 pciide_print_modes(cp); 4031 } 4032 4033 #define ACARD_IS_850(sc) \ 4034 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 4035 4036 void 4037 acard_chip_map(sc, pa) 4038 struct pciide_softc *sc; 4039 struct pci_attach_args *pa; 4040 { 4041 struct pciide_channel *cp; 4042 int i; 4043 pcireg_t interface; 4044 bus_size_t cmdsize, ctlsize; 4045 4046 if (pciide_chipen(sc, pa) == 0) 4047 return; 4048 4049 /* 4050 * when the chip is in native mode it identifies itself as a 4051 * 'misc mass storage'. Fake interface in this case. 4052 */ 4053 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4054 interface = PCI_INTERFACE(pa->pa_class); 4055 } else { 4056 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4057 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4058 } 4059 4060 printf("%s: bus-master DMA support present", 4061 sc->sc_wdcdev.sc_dev.dv_xname); 4062 pciide_mapreg_dma(sc, pa); 4063 printf("\n"); 4064 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4065 WDC_CAPABILITY_MODE; 4066 4067 if (sc->sc_dma_ok) { 4068 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4069 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4070 sc->sc_wdcdev.irqack = pciide_irqack; 4071 } 4072 sc->sc_wdcdev.PIO_cap = 4; 4073 sc->sc_wdcdev.DMA_cap = 2; 4074 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4; 4075 4076 sc->sc_wdcdev.set_modes = acard_setup_channel; 4077 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4078 sc->sc_wdcdev.nchannels = 2; 4079 4080 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4081 cp = &sc->pciide_channels[i]; 4082 if (pciide_chansetup(sc, i, interface) == 0) 4083 continue; 4084 if (interface & PCIIDE_INTERFACE_PCI(i)) { 4085 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 4086 &ctlsize, pciide_pci_intr); 4087 } else { 4088 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 4089 &cmdsize, &ctlsize); 4090 } 4091 if (cp->hw_ok == 0) 4092 return; 4093 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4094 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4095 wdcattach(&cp->wdc_channel); 4096 acard_setup_channel(&cp->wdc_channel); 4097 } 4098 if (!ACARD_IS_850(sc)) { 4099 u_int32_t reg; 4100 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 4101 reg &= ~ATP860_CTRL_INT; 4102 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 4103 } 4104 } 4105 4106 void 4107 acard_setup_channel(chp) 4108 struct channel_softc *chp; 4109 { 4110 struct ata_drive_datas *drvp; 4111 struct pciide_channel *cp = (struct pciide_channel*)chp; 4112 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4113 int channel = chp->channel; 4114 int drive; 4115 u_int32_t idetime, udma_mode; 4116 u_int32_t idedma_ctl; 4117 4118 /* setup DMA if needed */ 4119 pciide_channel_dma_setup(cp); 4120 4121 if (ACARD_IS_850(sc)) { 4122 idetime = 0; 4123 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 4124 udma_mode &= ~ATP850_UDMA_MASK(channel); 4125 } else { 4126 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 4127 idetime &= ~ATP860_SETTIME_MASK(channel); 4128 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 4129 udma_mode &= ~ATP860_UDMA_MASK(channel); 4130 4131 /* check 80 pins cable */ 4132 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) || 4133 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 4134 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 4135 & ATP860_CTRL_80P(chp->channel)) { 4136 if (chp->ch_drive[0].UDMA_mode > 2) 4137 chp->ch_drive[0].UDMA_mode = 2; 4138 if (chp->ch_drive[1].UDMA_mode > 2) 4139 chp->ch_drive[1].UDMA_mode = 2; 4140 } 4141 } 4142 } 4143 4144 idedma_ctl = 0; 4145 4146 /* Per drive settings */ 4147 for (drive = 0; drive < 2; drive++) { 4148 drvp = &chp->ch_drive[drive]; 4149 /* If no drive, skip */ 4150 if ((drvp->drive_flags & DRIVE) == 0) 4151 continue; 4152 /* add timing values, setup DMA if needed */ 4153 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 4154 (drvp->drive_flags & DRIVE_UDMA)) { 4155 /* use Ultra/DMA */ 4156 if (ACARD_IS_850(sc)) { 4157 idetime |= ATP850_SETTIME(drive, 4158 acard_act_udma[drvp->UDMA_mode], 4159 acard_rec_udma[drvp->UDMA_mode]); 4160 udma_mode |= ATP850_UDMA_MODE(channel, drive, 4161 acard_udma_conf[drvp->UDMA_mode]); 4162 } else { 4163 idetime |= ATP860_SETTIME(channel, drive, 4164 acard_act_udma[drvp->UDMA_mode], 4165 acard_rec_udma[drvp->UDMA_mode]); 4166 udma_mode |= ATP860_UDMA_MODE(channel, drive, 4167 acard_udma_conf[drvp->UDMA_mode]); 4168 } 4169 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4170 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 4171 (drvp->drive_flags & DRIVE_DMA)) { 4172 /* use Multiword DMA */ 4173 drvp->drive_flags &= ~DRIVE_UDMA; 4174 if (ACARD_IS_850(sc)) { 4175 idetime |= ATP850_SETTIME(drive, 4176 acard_act_dma[drvp->DMA_mode], 4177 acard_rec_dma[drvp->DMA_mode]); 4178 } else { 4179 idetime |= ATP860_SETTIME(channel, drive, 4180 acard_act_dma[drvp->DMA_mode], 4181 acard_rec_dma[drvp->DMA_mode]); 4182 } 4183 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4184 } else { 4185 /* PIO only */ 4186 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 4187 if (ACARD_IS_850(sc)) { 4188 idetime |= ATP850_SETTIME(drive, 4189 acard_act_pio[drvp->PIO_mode], 4190 acard_rec_pio[drvp->PIO_mode]); 4191 } else { 4192 idetime |= ATP860_SETTIME(channel, drive, 4193 acard_act_pio[drvp->PIO_mode], 4194 acard_rec_pio[drvp->PIO_mode]); 4195 } 4196 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 4197 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 4198 | ATP8x0_CTRL_EN(channel)); 4199 } 4200 } 4201 4202 if (idedma_ctl != 0) { 4203 /* Add software bits in status register */ 4204 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4205 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl); 4206 } 4207 pciide_print_modes(cp); 4208 4209 if (ACARD_IS_850(sc)) { 4210 pci_conf_write(sc->sc_pc, sc->sc_tag, 4211 ATP850_IDETIME(channel), idetime); 4212 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 4213 } else { 4214 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 4215 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 4216 } 4217 } 4218 4219 int 4220 acard_pci_intr(arg) 4221 void *arg; 4222 { 4223 struct pciide_softc *sc = arg; 4224 struct pciide_channel *cp; 4225 struct channel_softc *wdc_cp; 4226 int rv = 0; 4227 int dmastat, i, crv; 4228 4229 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4230 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4231 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4232 if ((dmastat & IDEDMA_CTL_INTR) == 0) 4233 continue; 4234 cp = &sc->pciide_channels[i]; 4235 wdc_cp = &cp->wdc_channel; 4236 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) { 4237 (void)wdcintr(wdc_cp); 4238 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4239 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 4240 continue; 4241 } 4242 crv = wdcintr(wdc_cp); 4243 if (crv == 0) 4244 printf("%s:%d: bogus intr\n", 4245 sc->sc_wdcdev.sc_dev.dv_xname, i); 4246 else if (crv == 1) 4247 rv = 1; 4248 else if (rv == 0) 4249 rv = crv; 4250 } 4251 return rv; 4252 } 4253 4254 static int 4255 sl82c105_bugchk(struct pci_attach_args *pa) 4256 { 4257 4258 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND || 4259 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0) 4260 return (0); 4261 4262 if (PCI_REVISION(pa->pa_class) <= 0x05) 4263 return (1); 4264 4265 return (0); 4266 } 4267 4268 void 4269 sl82c105_chip_map(sc, pa) 4270 struct pciide_softc *sc; 4271 struct pci_attach_args *pa; 4272 { 4273 struct pciide_channel *cp; 4274 bus_size_t cmdsize, ctlsize; 4275 pcireg_t interface, idecr; 4276 int channel; 4277 4278 if (pciide_chipen(sc, pa) == 0) 4279 return; 4280 4281 printf("%s: bus-master DMA support present", 4282 sc->sc_wdcdev.sc_dev.dv_xname); 4283 4284 /* 4285 * Check to see if we're part of the Winbond 83c553 Southbridge. 4286 * If so, we need to disable DMA on rev. <= 5 of that chip. 4287 */ 4288 if (pci_find_device(pa, sl82c105_bugchk)) { 4289 printf(" but disabled due to 83c553 rev. <= 0x05"); 4290 sc->sc_dma_ok = 0; 4291 } else 4292 pciide_mapreg_dma(sc, pa); 4293 printf("\n"); 4294 4295 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 | 4296 WDC_CAPABILITY_MODE; 4297 sc->sc_wdcdev.PIO_cap = 4; 4298 if (sc->sc_dma_ok) { 4299 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4300 sc->sc_wdcdev.irqack = pciide_irqack; 4301 sc->sc_wdcdev.DMA_cap = 2; 4302 } 4303 sc->sc_wdcdev.set_modes = sl82c105_setup_channel; 4304 4305 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4306 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4307 4308 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR); 4309 4310 interface = PCI_INTERFACE(pa->pa_class); 4311 4312 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4313 cp = &sc->pciide_channels[channel]; 4314 if (pciide_chansetup(sc, channel, interface) == 0) 4315 continue; 4316 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) || 4317 (channel == 1 && (idecr & IDECR_P1EN) == 0)) { 4318 printf("%s: %s channel ignored (disabled)\n", 4319 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4320 continue; 4321 } 4322 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4323 pciide_pci_intr); 4324 if (cp->hw_ok == 0) 4325 continue; 4326 pciide_map_compat_intr(pa, cp, channel, interface); 4327 if (cp->hw_ok == 0) 4328 continue; 4329 sl82c105_setup_channel(&cp->wdc_channel); 4330 } 4331 } 4332 4333 void 4334 sl82c105_setup_channel(chp) 4335 struct channel_softc *chp; 4336 { 4337 struct ata_drive_datas *drvp; 4338 struct pciide_channel *cp = (struct pciide_channel*)chp; 4339 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4340 int pxdx_reg, drive; 4341 pcireg_t pxdx; 4342 4343 /* Set up DMA if needed. */ 4344 pciide_channel_dma_setup(cp); 4345 4346 for (drive = 0; drive < 2; drive++) { 4347 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR 4348 : SYMPH_P1D0CR) + (drive * 4); 4349 4350 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg); 4351 4352 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK); 4353 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN); 4354 4355 drvp = &chp->ch_drive[drive]; 4356 /* If no drive, skip. */ 4357 if ((drvp->drive_flags & DRIVE) == 0) { 4358 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx); 4359 continue; 4360 } 4361 4362 if (drvp->drive_flags & DRIVE_DMA) { 4363 /* 4364 * Timings will be used for both PIO and DMA, 4365 * so adjust DMA mode if needed. 4366 */ 4367 if (drvp->PIO_mode >= 3) { 4368 if ((drvp->DMA_mode + 2) > drvp->PIO_mode) 4369 drvp->DMA_mode = drvp->PIO_mode - 2; 4370 if (drvp->DMA_mode < 1) { 4371 /* 4372 * Can't mix both PIO and DMA. 4373 * Disable DMA. 4374 */ 4375 drvp->drive_flags &= ~DRIVE_DMA; 4376 } 4377 } else { 4378 /* 4379 * Can't mix both PIO and DMA. Disable 4380 * DMA. 4381 */ 4382 drvp->drive_flags &= ~DRIVE_DMA; 4383 } 4384 } 4385 4386 if (drvp->drive_flags & DRIVE_DMA) { 4387 /* Use multi-word DMA. */ 4388 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on << 4389 PxDx_CMD_ON_SHIFT; 4390 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off; 4391 } else { 4392 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on << 4393 PxDx_CMD_ON_SHIFT; 4394 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off; 4395 } 4396 4397 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */ 4398 4399 /* ...and set the mode for this drive. */ 4400 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx); 4401 } 4402 4403 pciide_print_modes(cp); 4404 } 4405 4406 void 4407 serverworks_chip_map(sc, pa) 4408 struct pciide_softc *sc; 4409 struct pci_attach_args *pa; 4410 { 4411 struct pciide_channel *cp; 4412 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4413 pcitag_t pcib_tag; 4414 int channel; 4415 bus_size_t cmdsize, ctlsize; 4416 4417 if (pciide_chipen(sc, pa) == 0) 4418 return; 4419 4420 printf("%s: bus-master DMA support present", 4421 sc->sc_wdcdev.sc_dev.dv_xname); 4422 pciide_mapreg_dma(sc, pa); 4423 printf("\n"); 4424 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4425 WDC_CAPABILITY_MODE; 4426 4427 if (sc->sc_dma_ok) { 4428 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4429 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4430 sc->sc_wdcdev.irqack = pciide_irqack; 4431 } 4432 sc->sc_wdcdev.PIO_cap = 4; 4433 sc->sc_wdcdev.DMA_cap = 2; 4434 switch (sc->sc_pp->ide_product) { 4435 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE: 4436 sc->sc_wdcdev.UDMA_cap = 2; 4437 break; 4438 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE: 4439 if (PCI_REVISION(pa->pa_class) < 0x92) 4440 sc->sc_wdcdev.UDMA_cap = 4; 4441 else 4442 sc->sc_wdcdev.UDMA_cap = 5; 4443 break; 4444 } 4445 4446 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 4447 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4448 sc->sc_wdcdev.nchannels = 2; 4449 4450 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4451 cp = &sc->pciide_channels[channel]; 4452 if (pciide_chansetup(sc, channel, interface) == 0) 4453 continue; 4454 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4455 serverworks_pci_intr); 4456 if (cp->hw_ok == 0) 4457 return; 4458 pciide_map_compat_intr(pa, cp, channel, interface); 4459 if (cp->hw_ok == 0) 4460 return; 4461 serverworks_setup_channel(&cp->wdc_channel); 4462 } 4463 4464 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 4465 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 4466 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 4467 } 4468 4469 void 4470 serverworks_setup_channel(chp) 4471 struct channel_softc *chp; 4472 { 4473 struct ata_drive_datas *drvp; 4474 struct pciide_channel *cp = (struct pciide_channel*)chp; 4475 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4476 int channel = chp->channel; 4477 int drive, unit; 4478 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 4479 u_int32_t idedma_ctl; 4480 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 4481 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 4482 4483 /* setup DMA if needed */ 4484 pciide_channel_dma_setup(cp); 4485 4486 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 4487 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 4488 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 4489 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 4490 4491 pio_time &= ~(0xffff << (16 * channel)); 4492 dma_time &= ~(0xffff << (16 * channel)); 4493 pio_mode &= ~(0xff << (8 * channel + 16)); 4494 udma_mode &= ~(0xff << (8 * channel + 16)); 4495 udma_mode &= ~(3 << (2 * channel)); 4496 4497 idedma_ctl = 0; 4498 4499 /* Per drive settings */ 4500 for (drive = 0; drive < 2; drive++) { 4501 drvp = &chp->ch_drive[drive]; 4502 /* If no drive, skip */ 4503 if ((drvp->drive_flags & DRIVE) == 0) 4504 continue; 4505 unit = drive + 2 * channel; 4506 /* add timing values, setup DMA if needed */ 4507 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 4508 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 4509 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 4510 (drvp->drive_flags & DRIVE_UDMA)) { 4511 /* use Ultra/DMA, check for 80-pin cable */ 4512 if (drvp->UDMA_mode > 2 && 4513 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0) 4514 drvp->UDMA_mode = 2; 4515 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 4516 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 4517 udma_mode |= 1 << unit; 4518 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4519 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 4520 (drvp->drive_flags & DRIVE_DMA)) { 4521 /* use Multiword DMA */ 4522 drvp->drive_flags &= ~DRIVE_UDMA; 4523 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 4524 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4525 } else { 4526 /* PIO only */ 4527 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 4528 } 4529 } 4530 4531 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 4532 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 4533 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE) 4534 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 4535 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 4536 4537 if (idedma_ctl != 0) { 4538 /* Add software bits in status register */ 4539 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4540 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl); 4541 } 4542 pciide_print_modes(cp); 4543 } 4544 4545 int 4546 serverworks_pci_intr(arg) 4547 void *arg; 4548 { 4549 struct pciide_softc *sc = arg; 4550 struct pciide_channel *cp; 4551 struct channel_softc *wdc_cp; 4552 int rv = 0; 4553 int dmastat, i, crv; 4554 4555 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4556 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4557 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4558 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 4559 IDEDMA_CTL_INTR) 4560 continue; 4561 cp = &sc->pciide_channels[i]; 4562 wdc_cp = &cp->wdc_channel; 4563 crv = wdcintr(wdc_cp); 4564 if (crv == 0) { 4565 printf("%s:%d: bogus intr\n", 4566 sc->sc_wdcdev.sc_dev.dv_xname, i); 4567 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4568 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 4569 } else 4570 rv = 1; 4571 } 4572 return rv; 4573 } 4574