1 /* $NetBSD: pciide.c,v 1.169 2002/09/15 20:27:09 bouyer Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 36 /* 37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by Christopher G. Demetriou 50 * for the NetBSD Project. 51 * 4. The name of the author may not be used to endorse or promote products 52 * derived from this software without specific prior written permission 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * PCI IDE controller driver. 68 * 69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 70 * sys/dev/pci/ppb.c, revision 1.16). 71 * 72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 74 * 5/16/94" from the PCI SIG. 75 * 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.169 2002/09/15 20:27:09 bouyer Exp $"); 80 81 #ifndef WDCDEBUG 82 #define WDCDEBUG 83 #endif 84 85 #define DEBUG_DMA 0x01 86 #define DEBUG_XFERS 0x02 87 #define DEBUG_FUNCS 0x08 88 #define DEBUG_PROBE 0x10 89 #ifdef WDCDEBUG 90 int wdcdebug_pciide_mask = 0; 91 #define WDCDEBUG_PRINT(args, level) \ 92 if (wdcdebug_pciide_mask & (level)) printf args 93 #else 94 #define WDCDEBUG_PRINT(args, level) 95 #endif 96 #include <sys/param.h> 97 #include <sys/systm.h> 98 #include <sys/device.h> 99 #include <sys/malloc.h> 100 101 #include <uvm/uvm_extern.h> 102 103 #include <machine/endian.h> 104 105 #include <dev/pci/pcireg.h> 106 #include <dev/pci/pcivar.h> 107 #include <dev/pci/pcidevs.h> 108 #include <dev/pci/pciidereg.h> 109 #include <dev/pci/pciidevar.h> 110 #include <dev/pci/pciide_piix_reg.h> 111 #include <dev/pci/pciide_amd_reg.h> 112 #include <dev/pci/pciide_apollo_reg.h> 113 #include <dev/pci/pciide_cmd_reg.h> 114 #include <dev/pci/pciide_cy693_reg.h> 115 #include <dev/pci/pciide_sis_reg.h> 116 #include <dev/pci/pciide_acer_reg.h> 117 #include <dev/pci/pciide_pdc202xx_reg.h> 118 #include <dev/pci/pciide_opti_reg.h> 119 #include <dev/pci/pciide_hpt_reg.h> 120 #include <dev/pci/pciide_acard_reg.h> 121 #include <dev/pci/pciide_sl82c105_reg.h> 122 #include <dev/pci/cy82c693var.h> 123 124 #include "opt_pciide.h" 125 126 /* inlines for reading/writing 8-bit PCI registers */ 127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t, 128 int)); 129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t, 130 int, u_int8_t)); 131 132 static __inline u_int8_t 133 pciide_pci_read(pc, pa, reg) 134 pci_chipset_tag_t pc; 135 pcitag_t pa; 136 int reg; 137 { 138 139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 140 ((reg & 0x03) * 8) & 0xff); 141 } 142 143 static __inline void 144 pciide_pci_write(pc, pa, reg, val) 145 pci_chipset_tag_t pc; 146 pcitag_t pa; 147 int reg; 148 u_int8_t val; 149 { 150 pcireg_t pcival; 151 152 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 153 pcival &= ~(0xff << ((reg & 0x03) * 8)); 154 pcival |= (val << ((reg & 0x03) * 8)); 155 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 156 } 157 158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 159 160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 161 void piix_setup_channel __P((struct channel_softc*)); 162 void piix3_4_setup_channel __P((struct channel_softc*)); 163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*)); 165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 166 167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 168 void amd7x6_setup_channel __P((struct channel_softc*)); 169 170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 171 void apollo_setup_channel __P((struct channel_softc*)); 172 173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 175 void cmd0643_9_setup_channel __P((struct channel_softc*)); 176 void cmd_channel_map __P((struct pci_attach_args *, 177 struct pciide_softc *, int)); 178 int cmd_pci_intr __P((void *)); 179 void cmd646_9_irqack __P((struct channel_softc *)); 180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 181 void cmd680_setup_channel __P((struct channel_softc*)); 182 void cmd680_channel_map __P((struct pci_attach_args *, 183 struct pciide_softc *, int)); 184 185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 186 void cy693_setup_channel __P((struct channel_softc*)); 187 188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 189 void sis_setup_channel __P((struct channel_softc*)); 190 static int sis_hostbr_match __P(( struct pci_attach_args *)); 191 192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 193 void acer_setup_channel __P((struct channel_softc*)); 194 int acer_pci_intr __P((void *)); 195 196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 197 void pdc202xx_setup_channel __P((struct channel_softc*)); 198 void pdc20268_setup_channel __P((struct channel_softc*)); 199 int pdc202xx_pci_intr __P((void *)); 200 int pdc20265_pci_intr __P((void *)); 201 202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 203 void opti_setup_channel __P((struct channel_softc*)); 204 205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 206 void hpt_setup_channel __P((struct channel_softc*)); 207 int hpt_pci_intr __P((void *)); 208 209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 210 void acard_setup_channel __P((struct channel_softc*)); 211 int acard_pci_intr __P((void *)); 212 213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 214 void serverworks_setup_channel __P((struct channel_softc*)); 215 int serverworks_pci_intr __P((void *)); 216 217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 218 void sl82c105_setup_channel __P((struct channel_softc*)); 219 220 void pciide_channel_dma_setup __P((struct pciide_channel *)); 221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int)); 222 int pciide_dma_init __P((void*, int, int, void *, size_t, int)); 223 void pciide_dma_start __P((void*, int, int)); 224 int pciide_dma_finish __P((void*, int, int, int)); 225 void pciide_irqack __P((struct channel_softc *)); 226 void pciide_print_modes __P((struct pciide_channel *)); 227 228 struct pciide_product_desc { 229 u_int32_t ide_product; 230 int ide_flags; 231 const char *ide_name; 232 /* map and setup chip, probe drives */ 233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*)); 234 }; 235 236 /* Flags for ide_flags */ 237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 239 240 /* Default product description for devices not known from this controller */ 241 const struct pciide_product_desc default_product_desc = { 242 0, 243 0, 244 "Generic PCI IDE controller", 245 default_chip_map, 246 }; 247 248 const struct pciide_product_desc pciide_intel_products[] = { 249 { PCI_PRODUCT_INTEL_82092AA, 250 0, 251 "Intel 82092AA IDE controller", 252 default_chip_map, 253 }, 254 { PCI_PRODUCT_INTEL_82371FB_IDE, 255 0, 256 "Intel 82371FB IDE controller (PIIX)", 257 piix_chip_map, 258 }, 259 { PCI_PRODUCT_INTEL_82371SB_IDE, 260 0, 261 "Intel 82371SB IDE Interface (PIIX3)", 262 piix_chip_map, 263 }, 264 { PCI_PRODUCT_INTEL_82371AB_IDE, 265 0, 266 "Intel 82371AB IDE controller (PIIX4)", 267 piix_chip_map, 268 }, 269 { PCI_PRODUCT_INTEL_82440MX_IDE, 270 0, 271 "Intel 82440MX IDE controller", 272 piix_chip_map 273 }, 274 { PCI_PRODUCT_INTEL_82801AA_IDE, 275 0, 276 "Intel 82801AA IDE Controller (ICH)", 277 piix_chip_map, 278 }, 279 { PCI_PRODUCT_INTEL_82801AB_IDE, 280 0, 281 "Intel 82801AB IDE Controller (ICH0)", 282 piix_chip_map, 283 }, 284 { PCI_PRODUCT_INTEL_82801BA_IDE, 285 0, 286 "Intel 82801BA IDE Controller (ICH2)", 287 piix_chip_map, 288 }, 289 { PCI_PRODUCT_INTEL_82801BAM_IDE, 290 0, 291 "Intel 82801BAM IDE Controller (ICH2)", 292 piix_chip_map, 293 }, 294 { PCI_PRODUCT_INTEL_82801CA_IDE_1, 295 0, 296 "Intel 82801CA IDE Controller", 297 piix_chip_map, 298 }, 299 { PCI_PRODUCT_INTEL_82801CA_IDE_2, 300 0, 301 "Intel 82801CA IDE Controller", 302 piix_chip_map, 303 }, 304 { PCI_PRODUCT_INTEL_82801DB_IDE, 305 0, 306 "Intel 82801DB IDE Controller (ICH4)", 307 piix_chip_map, 308 }, 309 { 0, 310 0, 311 NULL, 312 NULL 313 } 314 }; 315 316 const struct pciide_product_desc pciide_amd_products[] = { 317 { PCI_PRODUCT_AMD_PBC756_IDE, 318 0, 319 "Advanced Micro Devices AMD756 IDE Controller", 320 amd7x6_chip_map 321 }, 322 { PCI_PRODUCT_AMD_PBC766_IDE, 323 0, 324 "Advanced Micro Devices AMD766 IDE Controller", 325 amd7x6_chip_map 326 }, 327 { PCI_PRODUCT_AMD_PBC768_IDE, 328 0, 329 "Advanced Micro Devices AMD768 IDE Controller", 330 amd7x6_chip_map 331 }, 332 { PCI_PRODUCT_AMD_PBC8111_IDE, 333 0, 334 "Advanced Micro Devices AMD8111 IDE Controller", 335 amd7x6_chip_map 336 }, 337 { 0, 338 0, 339 NULL, 340 NULL 341 } 342 }; 343 344 const struct pciide_product_desc pciide_cmd_products[] = { 345 { PCI_PRODUCT_CMDTECH_640, 346 0, 347 "CMD Technology PCI0640", 348 cmd_chip_map 349 }, 350 { PCI_PRODUCT_CMDTECH_643, 351 0, 352 "CMD Technology PCI0643", 353 cmd0643_9_chip_map, 354 }, 355 { PCI_PRODUCT_CMDTECH_646, 356 0, 357 "CMD Technology PCI0646", 358 cmd0643_9_chip_map, 359 }, 360 { PCI_PRODUCT_CMDTECH_648, 361 IDE_PCI_CLASS_OVERRIDE, 362 "CMD Technology PCI0648", 363 cmd0643_9_chip_map, 364 }, 365 { PCI_PRODUCT_CMDTECH_649, 366 IDE_PCI_CLASS_OVERRIDE, 367 "CMD Technology PCI0649", 368 cmd0643_9_chip_map, 369 }, 370 { PCI_PRODUCT_CMDTECH_680, 371 IDE_PCI_CLASS_OVERRIDE, 372 "Silicon Image 0680", 373 cmd680_chip_map, 374 }, 375 { 0, 376 0, 377 NULL, 378 NULL 379 } 380 }; 381 382 const struct pciide_product_desc pciide_via_products[] = { 383 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 384 0, 385 NULL, 386 apollo_chip_map, 387 }, 388 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 389 0, 390 NULL, 391 apollo_chip_map, 392 }, 393 { 0, 394 0, 395 NULL, 396 NULL 397 } 398 }; 399 400 const struct pciide_product_desc pciide_cypress_products[] = { 401 { PCI_PRODUCT_CONTAQ_82C693, 402 IDE_16BIT_IOSPACE, 403 "Cypress 82C693 IDE Controller", 404 cy693_chip_map, 405 }, 406 { 0, 407 0, 408 NULL, 409 NULL 410 } 411 }; 412 413 const struct pciide_product_desc pciide_sis_products[] = { 414 { PCI_PRODUCT_SIS_5597_IDE, 415 0, 416 "Silicon Integrated System 5597/5598 IDE controller", 417 sis_chip_map, 418 }, 419 { 0, 420 0, 421 NULL, 422 NULL 423 } 424 }; 425 426 const struct pciide_product_desc pciide_acer_products[] = { 427 { PCI_PRODUCT_ALI_M5229, 428 0, 429 "Acer Labs M5229 UDMA IDE Controller", 430 acer_chip_map, 431 }, 432 { 0, 433 0, 434 NULL, 435 NULL 436 } 437 }; 438 439 const struct pciide_product_desc pciide_promise_products[] = { 440 { PCI_PRODUCT_PROMISE_ULTRA33, 441 IDE_PCI_CLASS_OVERRIDE, 442 "Promise Ultra33/ATA Bus Master IDE Accelerator", 443 pdc202xx_chip_map, 444 }, 445 { PCI_PRODUCT_PROMISE_ULTRA66, 446 IDE_PCI_CLASS_OVERRIDE, 447 "Promise Ultra66/ATA Bus Master IDE Accelerator", 448 pdc202xx_chip_map, 449 }, 450 { PCI_PRODUCT_PROMISE_ULTRA100, 451 IDE_PCI_CLASS_OVERRIDE, 452 "Promise Ultra100/ATA Bus Master IDE Accelerator", 453 pdc202xx_chip_map, 454 }, 455 { PCI_PRODUCT_PROMISE_ULTRA100X, 456 IDE_PCI_CLASS_OVERRIDE, 457 "Promise Ultra100/ATA Bus Master IDE Accelerator", 458 pdc202xx_chip_map, 459 }, 460 { PCI_PRODUCT_PROMISE_ULTRA100TX2, 461 IDE_PCI_CLASS_OVERRIDE, 462 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator", 463 pdc202xx_chip_map, 464 }, 465 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2, 466 IDE_PCI_CLASS_OVERRIDE, 467 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator", 468 pdc202xx_chip_map, 469 }, 470 { PCI_PRODUCT_PROMISE_ULTRA133, 471 IDE_PCI_CLASS_OVERRIDE, 472 "Promise Ultra133/ATA Bus Master IDE Accelerator", 473 pdc202xx_chip_map, 474 }, 475 { PCI_PRODUCT_PROMISE_ULTRA133TX2, 476 IDE_PCI_CLASS_OVERRIDE, 477 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator", 478 pdc202xx_chip_map, 479 }, 480 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2, 481 IDE_PCI_CLASS_OVERRIDE, 482 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator", 483 pdc202xx_chip_map, 484 }, 485 { 0, 486 0, 487 NULL, 488 NULL 489 } 490 }; 491 492 const struct pciide_product_desc pciide_opti_products[] = { 493 { PCI_PRODUCT_OPTI_82C621, 494 0, 495 "OPTi 82c621 PCI IDE controller", 496 opti_chip_map, 497 }, 498 { PCI_PRODUCT_OPTI_82C568, 499 0, 500 "OPTi 82c568 (82c621 compatible) PCI IDE controller", 501 opti_chip_map, 502 }, 503 { PCI_PRODUCT_OPTI_82D568, 504 0, 505 "OPTi 82d568 (82c621 compatible) PCI IDE controller", 506 opti_chip_map, 507 }, 508 { 0, 509 0, 510 NULL, 511 NULL 512 } 513 }; 514 515 const struct pciide_product_desc pciide_triones_products[] = { 516 { PCI_PRODUCT_TRIONES_HPT366, 517 IDE_PCI_CLASS_OVERRIDE, 518 NULL, 519 hpt_chip_map, 520 }, 521 { PCI_PRODUCT_TRIONES_HPT372, 522 IDE_PCI_CLASS_OVERRIDE, 523 NULL, 524 hpt_chip_map 525 }, 526 { PCI_PRODUCT_TRIONES_HPT374, 527 IDE_PCI_CLASS_OVERRIDE, 528 NULL, 529 hpt_chip_map 530 }, 531 { 0, 532 0, 533 NULL, 534 NULL 535 } 536 }; 537 538 const struct pciide_product_desc pciide_acard_products[] = { 539 { PCI_PRODUCT_ACARD_ATP850U, 540 IDE_PCI_CLASS_OVERRIDE, 541 "Acard ATP850U Ultra33 IDE Controller", 542 acard_chip_map, 543 }, 544 { PCI_PRODUCT_ACARD_ATP860, 545 IDE_PCI_CLASS_OVERRIDE, 546 "Acard ATP860 Ultra66 IDE Controller", 547 acard_chip_map, 548 }, 549 { PCI_PRODUCT_ACARD_ATP860A, 550 IDE_PCI_CLASS_OVERRIDE, 551 "Acard ATP860-A Ultra66 IDE Controller", 552 acard_chip_map, 553 }, 554 { 0, 555 0, 556 NULL, 557 NULL 558 } 559 }; 560 561 const struct pciide_product_desc pciide_serverworks_products[] = { 562 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE, 563 0, 564 "ServerWorks OSB4 IDE Controller", 565 serverworks_chip_map, 566 }, 567 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE, 568 0, 569 "ServerWorks CSB5 IDE Controller", 570 serverworks_chip_map, 571 }, 572 { 0, 573 0, 574 NULL, 575 } 576 }; 577 578 const struct pciide_product_desc pciide_symphony_products[] = { 579 { PCI_PRODUCT_SYMPHONY_82C105, 580 0, 581 "Symphony Labs 82C105 IDE controller", 582 sl82c105_chip_map, 583 }, 584 { 0, 585 0, 586 NULL, 587 } 588 }; 589 590 const struct pciide_product_desc pciide_winbond_products[] = { 591 { PCI_PRODUCT_WINBOND_W83C553F_1, 592 0, 593 "Winbond W83C553F IDE controller", 594 sl82c105_chip_map, 595 }, 596 { 0, 597 0, 598 NULL, 599 } 600 }; 601 602 struct pciide_vendor_desc { 603 u_int32_t ide_vendor; 604 const struct pciide_product_desc *ide_products; 605 }; 606 607 const struct pciide_vendor_desc pciide_vendors[] = { 608 { PCI_VENDOR_INTEL, pciide_intel_products }, 609 { PCI_VENDOR_CMDTECH, pciide_cmd_products }, 610 { PCI_VENDOR_VIATECH, pciide_via_products }, 611 { PCI_VENDOR_CONTAQ, pciide_cypress_products }, 612 { PCI_VENDOR_SIS, pciide_sis_products }, 613 { PCI_VENDOR_ALI, pciide_acer_products }, 614 { PCI_VENDOR_PROMISE, pciide_promise_products }, 615 { PCI_VENDOR_AMD, pciide_amd_products }, 616 { PCI_VENDOR_OPTI, pciide_opti_products }, 617 { PCI_VENDOR_TRIONES, pciide_triones_products }, 618 { PCI_VENDOR_ACARD, pciide_acard_products }, 619 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products }, 620 { PCI_VENDOR_SYMPHONY, pciide_symphony_products }, 621 { PCI_VENDOR_WINBOND, pciide_winbond_products }, 622 { 0, NULL } 623 }; 624 625 /* options passed via the 'flags' config keyword */ 626 #define PCIIDE_OPTIONS_DMA 0x01 627 #define PCIIDE_OPTIONS_NODMA 0x02 628 629 int pciide_match __P((struct device *, struct cfdata *, void *)); 630 void pciide_attach __P((struct device *, struct device *, void *)); 631 632 struct cfattach pciide_ca = { 633 sizeof(struct pciide_softc), pciide_match, pciide_attach 634 }; 635 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *)); 636 int pciide_mapregs_compat __P(( struct pci_attach_args *, 637 struct pciide_channel *, int, bus_size_t *, bus_size_t*)); 638 int pciide_mapregs_native __P((struct pci_attach_args *, 639 struct pciide_channel *, bus_size_t *, bus_size_t *, 640 int (*pci_intr) __P((void *)))); 641 void pciide_mapreg_dma __P((struct pciide_softc *, 642 struct pci_attach_args *)); 643 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t)); 644 void pciide_mapchan __P((struct pci_attach_args *, 645 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 646 int (*pci_intr) __P((void *)))); 647 int pciide_chan_candisable __P((struct pciide_channel *)); 648 void pciide_map_compat_intr __P(( struct pci_attach_args *, 649 struct pciide_channel *, int, int)); 650 int pciide_compat_intr __P((void *)); 651 int pciide_pci_intr __P((void *)); 652 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t)); 653 654 const struct pciide_product_desc * 655 pciide_lookup_product(id) 656 u_int32_t id; 657 { 658 const struct pciide_product_desc *pp; 659 const struct pciide_vendor_desc *vp; 660 661 for (vp = pciide_vendors; vp->ide_products != NULL; vp++) 662 if (PCI_VENDOR(id) == vp->ide_vendor) 663 break; 664 665 if ((pp = vp->ide_products) == NULL) 666 return NULL; 667 668 for (; pp->chip_map != NULL; pp++) 669 if (PCI_PRODUCT(id) == pp->ide_product) 670 break; 671 672 if (pp->chip_map == NULL) 673 return NULL; 674 return pp; 675 } 676 677 int 678 pciide_match(parent, match, aux) 679 struct device *parent; 680 struct cfdata *match; 681 void *aux; 682 { 683 struct pci_attach_args *pa = aux; 684 const struct pciide_product_desc *pp; 685 686 /* 687 * Check the ID register to see that it's a PCI IDE controller. 688 * If it is, we assume that we can deal with it; it _should_ 689 * work in a standardized way... 690 */ 691 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 692 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 693 return (1); 694 } 695 696 /* 697 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE 698 * controllers. Let see if we can deal with it anyway. 699 */ 700 pp = pciide_lookup_product(pa->pa_id); 701 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 702 return (1); 703 } 704 705 return (0); 706 } 707 708 void 709 pciide_attach(parent, self, aux) 710 struct device *parent, *self; 711 void *aux; 712 { 713 struct pci_attach_args *pa = aux; 714 pci_chipset_tag_t pc = pa->pa_pc; 715 pcitag_t tag = pa->pa_tag; 716 struct pciide_softc *sc = (struct pciide_softc *)self; 717 pcireg_t csr; 718 char devinfo[256]; 719 const char *displaydev; 720 721 sc->sc_pp = pciide_lookup_product(pa->pa_id); 722 if (sc->sc_pp == NULL) { 723 sc->sc_pp = &default_product_desc; 724 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); 725 displaydev = devinfo; 726 } else 727 displaydev = sc->sc_pp->ide_name; 728 729 /* if displaydev == NULL, printf is done in chip-specific map */ 730 if (displaydev) 731 printf(": %s (rev. 0x%02x)\n", displaydev, 732 PCI_REVISION(pa->pa_class)); 733 734 sc->sc_pc = pa->pa_pc; 735 sc->sc_tag = pa->pa_tag; 736 #ifdef WDCDEBUG 737 if (wdcdebug_pciide_mask & DEBUG_PROBE) 738 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 739 #endif 740 sc->sc_pp->chip_map(sc, pa); 741 742 if (sc->sc_dma_ok) { 743 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 744 csr |= PCI_COMMAND_MASTER_ENABLE; 745 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 746 } 747 WDCDEBUG_PRINT(("pciide: command/status register=%x\n", 748 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 749 } 750 751 /* tell whether the chip is enabled or not */ 752 int 753 pciide_chipen(sc, pa) 754 struct pciide_softc *sc; 755 struct pci_attach_args *pa; 756 { 757 pcireg_t csr; 758 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 759 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 760 PCI_COMMAND_STATUS_REG); 761 printf("%s: device disabled (at %s)\n", 762 sc->sc_wdcdev.sc_dev.dv_xname, 763 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 764 "device" : "bridge"); 765 return 0; 766 } 767 return 1; 768 } 769 770 int 771 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 772 struct pci_attach_args *pa; 773 struct pciide_channel *cp; 774 int compatchan; 775 bus_size_t *cmdsizep, *ctlsizep; 776 { 777 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 778 struct channel_softc *wdc_cp = &cp->wdc_channel; 779 780 cp->compat = 1; 781 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 782 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 783 784 wdc_cp->cmd_iot = pa->pa_iot; 785 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 786 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 787 printf("%s: couldn't map %s channel cmd regs\n", 788 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 789 return (0); 790 } 791 792 wdc_cp->ctl_iot = pa->pa_iot; 793 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 794 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 795 printf("%s: couldn't map %s channel ctl regs\n", 796 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 797 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 798 PCIIDE_COMPAT_CMD_SIZE); 799 return (0); 800 } 801 802 return (1); 803 } 804 805 int 806 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 807 struct pci_attach_args * pa; 808 struct pciide_channel *cp; 809 bus_size_t *cmdsizep, *ctlsizep; 810 int (*pci_intr) __P((void *)); 811 { 812 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 813 struct channel_softc *wdc_cp = &cp->wdc_channel; 814 const char *intrstr; 815 pci_intr_handle_t intrhandle; 816 817 cp->compat = 0; 818 819 if (sc->sc_pci_ih == NULL) { 820 if (pci_intr_map(pa, &intrhandle) != 0) { 821 printf("%s: couldn't map native-PCI interrupt\n", 822 sc->sc_wdcdev.sc_dev.dv_xname); 823 return 0; 824 } 825 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 826 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 827 intrhandle, IPL_BIO, pci_intr, sc); 828 if (sc->sc_pci_ih != NULL) { 829 printf("%s: using %s for native-PCI interrupt\n", 830 sc->sc_wdcdev.sc_dev.dv_xname, 831 intrstr ? intrstr : "unknown interrupt"); 832 } else { 833 printf("%s: couldn't establish native-PCI interrupt", 834 sc->sc_wdcdev.sc_dev.dv_xname); 835 if (intrstr != NULL) 836 printf(" at %s", intrstr); 837 printf("\n"); 838 return 0; 839 } 840 } 841 cp->ih = sc->sc_pci_ih; 842 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 843 PCI_MAPREG_TYPE_IO, 0, 844 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) { 845 printf("%s: couldn't map %s channel cmd regs\n", 846 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 847 return 0; 848 } 849 850 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 851 PCI_MAPREG_TYPE_IO, 0, 852 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) { 853 printf("%s: couldn't map %s channel ctl regs\n", 854 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 855 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 856 return 0; 857 } 858 /* 859 * In native mode, 4 bytes of I/O space are mapped for the control 860 * register, the control register is at offset 2. Pass the generic 861 * code a handle for only one byte at the right offset. 862 */ 863 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 864 &wdc_cp->ctl_ioh) != 0) { 865 printf("%s: unable to subregion %s channel ctl regs\n", 866 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 867 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 868 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 869 return 0; 870 } 871 return (1); 872 } 873 874 void 875 pciide_mapreg_dma(sc, pa) 876 struct pciide_softc *sc; 877 struct pci_attach_args *pa; 878 { 879 pcireg_t maptype; 880 bus_addr_t addr; 881 882 /* 883 * Map DMA registers 884 * 885 * Note that sc_dma_ok is the right variable to test to see if 886 * DMA can be done. If the interface doesn't support DMA, 887 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 888 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 889 * non-zero if the interface supports DMA and the registers 890 * could be mapped. 891 * 892 * XXX Note that despite the fact that the Bus Master IDE specs 893 * XXX say that "The bus master IDE function uses 16 bytes of IO 894 * XXX space," some controllers (at least the United 895 * XXX Microelectronics UM8886BF) place it in memory space. 896 */ 897 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 898 PCIIDE_REG_BUS_MASTER_DMA); 899 900 switch (maptype) { 901 case PCI_MAPREG_TYPE_IO: 902 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 903 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 904 &addr, NULL, NULL) == 0); 905 if (sc->sc_dma_ok == 0) { 906 printf(", but unused (couldn't query registers)"); 907 break; 908 } 909 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 910 && addr >= 0x10000) { 911 sc->sc_dma_ok = 0; 912 printf(", but unused (registers at unsafe address " 913 "%#lx)", (unsigned long)addr); 914 break; 915 } 916 /* FALLTHROUGH */ 917 918 case PCI_MAPREG_MEM_TYPE_32BIT: 919 sc->sc_dma_ok = (pci_mapreg_map(pa, 920 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 921 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 922 sc->sc_dmat = pa->pa_dmat; 923 if (sc->sc_dma_ok == 0) { 924 printf(", but unused (couldn't map registers)"); 925 } else { 926 sc->sc_wdcdev.dma_arg = sc; 927 sc->sc_wdcdev.dma_init = pciide_dma_init; 928 sc->sc_wdcdev.dma_start = pciide_dma_start; 929 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 930 } 931 932 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 933 PCIIDE_OPTIONS_NODMA) { 934 printf(", but unused (forced off by config file)"); 935 sc->sc_dma_ok = 0; 936 } 937 break; 938 939 default: 940 sc->sc_dma_ok = 0; 941 printf(", but unsupported register maptype (0x%x)", maptype); 942 } 943 } 944 945 int 946 pciide_compat_intr(arg) 947 void *arg; 948 { 949 struct pciide_channel *cp = arg; 950 951 #ifdef DIAGNOSTIC 952 /* should only be called for a compat channel */ 953 if (cp->compat == 0) 954 panic("pciide compat intr called for non-compat chan %p\n", cp); 955 #endif 956 return (wdcintr(&cp->wdc_channel)); 957 } 958 959 int 960 pciide_pci_intr(arg) 961 void *arg; 962 { 963 struct pciide_softc *sc = arg; 964 struct pciide_channel *cp; 965 struct channel_softc *wdc_cp; 966 int i, rv, crv; 967 968 rv = 0; 969 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 970 cp = &sc->pciide_channels[i]; 971 wdc_cp = &cp->wdc_channel; 972 973 /* If a compat channel skip. */ 974 if (cp->compat) 975 continue; 976 /* if this channel not waiting for intr, skip */ 977 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 978 continue; 979 980 crv = wdcintr(wdc_cp); 981 if (crv == 0) 982 ; /* leave rv alone */ 983 else if (crv == 1) 984 rv = 1; /* claim the intr */ 985 else if (rv == 0) /* crv should be -1 in this case */ 986 rv = crv; /* if we've done no better, take it */ 987 } 988 return (rv); 989 } 990 991 void 992 pciide_channel_dma_setup(cp) 993 struct pciide_channel *cp; 994 { 995 int drive; 996 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 997 struct ata_drive_datas *drvp; 998 999 for (drive = 0; drive < 2; drive++) { 1000 drvp = &cp->wdc_channel.ch_drive[drive]; 1001 /* If no drive, skip */ 1002 if ((drvp->drive_flags & DRIVE) == 0) 1003 continue; 1004 /* setup DMA if needed */ 1005 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1006 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1007 sc->sc_dma_ok == 0) { 1008 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1009 continue; 1010 } 1011 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1012 != 0) { 1013 /* Abort DMA setup */ 1014 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1015 continue; 1016 } 1017 } 1018 } 1019 1020 int 1021 pciide_dma_table_setup(sc, channel, drive) 1022 struct pciide_softc *sc; 1023 int channel, drive; 1024 { 1025 bus_dma_segment_t seg; 1026 int error, rseg; 1027 const bus_size_t dma_table_size = 1028 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1029 struct pciide_dma_maps *dma_maps = 1030 &sc->pciide_channels[channel].dma_maps[drive]; 1031 1032 /* If table was already allocated, just return */ 1033 if (dma_maps->dma_table) 1034 return 0; 1035 1036 /* Allocate memory for the DMA tables and map it */ 1037 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1038 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1039 BUS_DMA_NOWAIT)) != 0) { 1040 printf("%s:%d: unable to allocate table DMA for " 1041 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1042 channel, drive, error); 1043 return error; 1044 } 1045 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1046 dma_table_size, 1047 (caddr_t *)&dma_maps->dma_table, 1048 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1049 printf("%s:%d: unable to map table DMA for" 1050 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1051 channel, drive, error); 1052 return error; 1053 } 1054 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 1055 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 1056 (unsigned long)seg.ds_addr), DEBUG_PROBE); 1057 1058 /* Create and load table DMA map for this disk */ 1059 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1060 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1061 &dma_maps->dmamap_table)) != 0) { 1062 printf("%s:%d: unable to create table DMA map for " 1063 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1064 channel, drive, error); 1065 return error; 1066 } 1067 if ((error = bus_dmamap_load(sc->sc_dmat, 1068 dma_maps->dmamap_table, 1069 dma_maps->dma_table, 1070 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1071 printf("%s:%d: unable to load table DMA map for " 1072 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1073 channel, drive, error); 1074 return error; 1075 } 1076 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1077 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 1078 DEBUG_PROBE); 1079 /* Create a xfer DMA map for this drive */ 1080 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1081 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN, 1082 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1083 &dma_maps->dmamap_xfer)) != 0) { 1084 printf("%s:%d: unable to create xfer DMA map for " 1085 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1086 channel, drive, error); 1087 return error; 1088 } 1089 return 0; 1090 } 1091 1092 int 1093 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 1094 void *v; 1095 int channel, drive; 1096 void *databuf; 1097 size_t datalen; 1098 int flags; 1099 { 1100 struct pciide_softc *sc = v; 1101 int error, seg; 1102 struct pciide_dma_maps *dma_maps = 1103 &sc->pciide_channels[channel].dma_maps[drive]; 1104 1105 error = bus_dmamap_load(sc->sc_dmat, 1106 dma_maps->dmamap_xfer, 1107 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1108 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); 1109 if (error) { 1110 printf("%s:%d: unable to load xfer DMA map for" 1111 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1112 channel, drive, error); 1113 return error; 1114 } 1115 1116 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1117 dma_maps->dmamap_xfer->dm_mapsize, 1118 (flags & WDC_DMA_READ) ? 1119 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1120 1121 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1122 #ifdef DIAGNOSTIC 1123 /* A segment must not cross a 64k boundary */ 1124 { 1125 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1126 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1127 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1128 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1129 printf("pciide_dma: segment %d physical addr 0x%lx" 1130 " len 0x%lx not properly aligned\n", 1131 seg, phys, len); 1132 panic("pciide_dma: buf align"); 1133 } 1134 } 1135 #endif 1136 dma_maps->dma_table[seg].base_addr = 1137 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1138 dma_maps->dma_table[seg].byte_count = 1139 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1140 IDEDMA_BYTE_COUNT_MASK); 1141 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1142 seg, le32toh(dma_maps->dma_table[seg].byte_count), 1143 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1144 1145 } 1146 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1147 htole32(IDEDMA_BYTE_COUNT_EOT); 1148 1149 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1150 dma_maps->dmamap_table->dm_mapsize, 1151 BUS_DMASYNC_PREWRITE); 1152 1153 /* Maps are ready. Start DMA function */ 1154 #ifdef DIAGNOSTIC 1155 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1156 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1157 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 1158 panic("pciide_dma_init: table align"); 1159 } 1160 #endif 1161 1162 /* Clear status bits */ 1163 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1164 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, 1165 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1166 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel)); 1167 /* Write table addr */ 1168 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1169 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel, 1170 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1171 /* set read/write */ 1172 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1173 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1174 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 1175 /* remember flags */ 1176 dma_maps->dma_flags = flags; 1177 return 0; 1178 } 1179 1180 void 1181 pciide_dma_start(v, channel, drive) 1182 void *v; 1183 int channel, drive; 1184 { 1185 struct pciide_softc *sc = v; 1186 1187 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 1188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1189 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1190 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1191 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START); 1192 } 1193 1194 int 1195 pciide_dma_finish(v, channel, drive, force) 1196 void *v; 1197 int channel, drive; 1198 int force; 1199 { 1200 struct pciide_softc *sc = v; 1201 u_int8_t status; 1202 int error = 0; 1203 struct pciide_dma_maps *dma_maps = 1204 &sc->pciide_channels[channel].dma_maps[drive]; 1205 1206 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1207 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel); 1208 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1209 DEBUG_XFERS); 1210 1211 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 1212 return WDC_DMAST_NOIRQ; 1213 1214 /* stop DMA channel */ 1215 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1216 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1217 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1218 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START); 1219 1220 /* Unload the map of the data buffer */ 1221 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1222 dma_maps->dmamap_xfer->dm_mapsize, 1223 (dma_maps->dma_flags & WDC_DMA_READ) ? 1224 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1225 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1226 1227 if ((status & IDEDMA_CTL_ERR) != 0) { 1228 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1229 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1230 error |= WDC_DMAST_ERR; 1231 } 1232 1233 if ((status & IDEDMA_CTL_INTR) == 0) { 1234 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1235 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1236 drive, status); 1237 error |= WDC_DMAST_NOIRQ; 1238 } 1239 1240 if ((status & IDEDMA_CTL_ACT) != 0) { 1241 /* data underrun, may be a valid condition for ATAPI */ 1242 error |= WDC_DMAST_UNDER; 1243 } 1244 return error; 1245 } 1246 1247 void 1248 pciide_irqack(chp) 1249 struct channel_softc *chp; 1250 { 1251 struct pciide_channel *cp = (struct pciide_channel*)chp; 1252 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1253 1254 /* clear status bits in IDE DMA registers */ 1255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1256 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel, 1257 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1258 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel)); 1259 } 1260 1261 /* some common code used by several chip_map */ 1262 int 1263 pciide_chansetup(sc, channel, interface) 1264 struct pciide_softc *sc; 1265 int channel; 1266 pcireg_t interface; 1267 { 1268 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1269 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1270 cp->name = PCIIDE_CHANNEL_NAME(channel); 1271 cp->wdc_channel.channel = channel; 1272 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1273 cp->wdc_channel.ch_queue = 1274 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1275 if (cp->wdc_channel.ch_queue == NULL) { 1276 printf("%s %s channel: " 1277 "can't allocate memory for command queue", 1278 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1279 return 0; 1280 } 1281 printf("%s: %s channel %s to %s mode\n", 1282 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1283 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 1284 "configured" : "wired", 1285 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 1286 "native-PCI" : "compatibility"); 1287 return 1; 1288 } 1289 1290 /* some common code used by several chip channel_map */ 1291 void 1292 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1293 struct pci_attach_args *pa; 1294 struct pciide_channel *cp; 1295 pcireg_t interface; 1296 bus_size_t *cmdsizep, *ctlsizep; 1297 int (*pci_intr) __P((void *)); 1298 { 1299 struct channel_softc *wdc_cp = &cp->wdc_channel; 1300 1301 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1302 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1303 pci_intr); 1304 else 1305 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1306 wdc_cp->channel, cmdsizep, ctlsizep); 1307 1308 if (cp->hw_ok == 0) 1309 return; 1310 wdc_cp->data32iot = wdc_cp->cmd_iot; 1311 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1312 wdcattach(wdc_cp); 1313 } 1314 1315 /* 1316 * Generic code to call to know if a channel can be disabled. Return 1 1317 * if channel can be disabled, 0 if not 1318 */ 1319 int 1320 pciide_chan_candisable(cp) 1321 struct pciide_channel *cp; 1322 { 1323 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1324 struct channel_softc *wdc_cp = &cp->wdc_channel; 1325 1326 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1327 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1328 printf("%s: disabling %s channel (no drives)\n", 1329 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1330 cp->hw_ok = 0; 1331 return 1; 1332 } 1333 return 0; 1334 } 1335 1336 /* 1337 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1338 * Set hw_ok=0 on failure 1339 */ 1340 void 1341 pciide_map_compat_intr(pa, cp, compatchan, interface) 1342 struct pci_attach_args *pa; 1343 struct pciide_channel *cp; 1344 int compatchan, interface; 1345 { 1346 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1347 struct channel_softc *wdc_cp = &cp->wdc_channel; 1348 1349 if (cp->hw_ok == 0) 1350 return; 1351 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1352 return; 1353 1354 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 1355 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1356 pa, compatchan, pciide_compat_intr, cp); 1357 if (cp->ih == NULL) { 1358 #endif 1359 printf("%s: no compatibility interrupt for use by %s " 1360 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1361 cp->hw_ok = 0; 1362 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 1363 } 1364 #endif 1365 } 1366 1367 void 1368 pciide_print_modes(cp) 1369 struct pciide_channel *cp; 1370 { 1371 wdc_print_modes(&cp->wdc_channel); 1372 } 1373 1374 void 1375 default_chip_map(sc, pa) 1376 struct pciide_softc *sc; 1377 struct pci_attach_args *pa; 1378 { 1379 struct pciide_channel *cp; 1380 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1381 pcireg_t csr; 1382 int channel, drive; 1383 struct ata_drive_datas *drvp; 1384 u_int8_t idedma_ctl; 1385 bus_size_t cmdsize, ctlsize; 1386 char *failreason; 1387 1388 if (pciide_chipen(sc, pa) == 0) 1389 return; 1390 1391 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1392 printf("%s: bus-master DMA support present", 1393 sc->sc_wdcdev.sc_dev.dv_xname); 1394 if (sc->sc_pp == &default_product_desc && 1395 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1396 PCIIDE_OPTIONS_DMA) == 0) { 1397 printf(", but unused (no driver support)"); 1398 sc->sc_dma_ok = 0; 1399 } else { 1400 pciide_mapreg_dma(sc, pa); 1401 if (sc->sc_dma_ok != 0) 1402 printf(", used without full driver " 1403 "support"); 1404 } 1405 } else { 1406 printf("%s: hardware does not support DMA", 1407 sc->sc_wdcdev.sc_dev.dv_xname); 1408 sc->sc_dma_ok = 0; 1409 } 1410 printf("\n"); 1411 if (sc->sc_dma_ok) { 1412 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1413 sc->sc_wdcdev.irqack = pciide_irqack; 1414 } 1415 sc->sc_wdcdev.PIO_cap = 0; 1416 sc->sc_wdcdev.DMA_cap = 0; 1417 1418 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1419 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1420 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1421 1422 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1423 cp = &sc->pciide_channels[channel]; 1424 if (pciide_chansetup(sc, channel, interface) == 0) 1425 continue; 1426 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1427 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1428 &ctlsize, pciide_pci_intr); 1429 } else { 1430 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1431 channel, &cmdsize, &ctlsize); 1432 } 1433 if (cp->hw_ok == 0) 1434 continue; 1435 /* 1436 * Check to see if something appears to be there. 1437 */ 1438 failreason = NULL; 1439 if (!wdcprobe(&cp->wdc_channel)) { 1440 failreason = "not responding; disabled or no drives?"; 1441 goto next; 1442 } 1443 /* 1444 * Now, make sure it's actually attributable to this PCI IDE 1445 * channel by trying to access the channel again while the 1446 * PCI IDE controller's I/O space is disabled. (If the 1447 * channel no longer appears to be there, it belongs to 1448 * this controller.) YUCK! 1449 */ 1450 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1451 PCI_COMMAND_STATUS_REG); 1452 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1453 csr & ~PCI_COMMAND_IO_ENABLE); 1454 if (wdcprobe(&cp->wdc_channel)) 1455 failreason = "other hardware responding at addresses"; 1456 pci_conf_write(sc->sc_pc, sc->sc_tag, 1457 PCI_COMMAND_STATUS_REG, csr); 1458 next: 1459 if (failreason) { 1460 printf("%s: %s channel ignored (%s)\n", 1461 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1462 failreason); 1463 cp->hw_ok = 0; 1464 bus_space_unmap(cp->wdc_channel.cmd_iot, 1465 cp->wdc_channel.cmd_ioh, cmdsize); 1466 if (interface & PCIIDE_INTERFACE_PCI(channel)) 1467 bus_space_unmap(cp->wdc_channel.ctl_iot, 1468 cp->ctl_baseioh, ctlsize); 1469 else 1470 bus_space_unmap(cp->wdc_channel.ctl_iot, 1471 cp->wdc_channel.ctl_ioh, ctlsize); 1472 } else { 1473 pciide_map_compat_intr(pa, cp, channel, interface); 1474 } 1475 if (cp->hw_ok) { 1476 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1477 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1478 wdcattach(&cp->wdc_channel); 1479 } 1480 } 1481 1482 if (sc->sc_dma_ok == 0) 1483 return; 1484 1485 /* Allocate DMA maps */ 1486 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1487 idedma_ctl = 0; 1488 cp = &sc->pciide_channels[channel]; 1489 for (drive = 0; drive < 2; drive++) { 1490 drvp = &cp->wdc_channel.ch_drive[drive]; 1491 /* If no drive, skip */ 1492 if ((drvp->drive_flags & DRIVE) == 0) 1493 continue; 1494 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1495 continue; 1496 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1497 /* Abort DMA setup */ 1498 printf("%s:%d:%d: can't allocate DMA maps, " 1499 "using PIO transfers\n", 1500 sc->sc_wdcdev.sc_dev.dv_xname, 1501 channel, drive); 1502 drvp->drive_flags &= ~DRIVE_DMA; 1503 } 1504 printf("%s:%d:%d: using DMA data transfers\n", 1505 sc->sc_wdcdev.sc_dev.dv_xname, 1506 channel, drive); 1507 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1508 } 1509 if (idedma_ctl != 0) { 1510 /* Add software bits in status register */ 1511 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1512 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1513 idedma_ctl); 1514 } 1515 } 1516 } 1517 1518 void 1519 piix_chip_map(sc, pa) 1520 struct pciide_softc *sc; 1521 struct pci_attach_args *pa; 1522 { 1523 struct pciide_channel *cp; 1524 int channel; 1525 u_int32_t idetim; 1526 bus_size_t cmdsize, ctlsize; 1527 1528 if (pciide_chipen(sc, pa) == 0) 1529 return; 1530 1531 printf("%s: bus-master DMA support present", 1532 sc->sc_wdcdev.sc_dev.dv_xname); 1533 pciide_mapreg_dma(sc, pa); 1534 printf("\n"); 1535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1536 WDC_CAPABILITY_MODE; 1537 if (sc->sc_dma_ok) { 1538 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1539 sc->sc_wdcdev.irqack = pciide_irqack; 1540 switch(sc->sc_pp->ide_product) { 1541 case PCI_PRODUCT_INTEL_82371AB_IDE: 1542 case PCI_PRODUCT_INTEL_82440MX_IDE: 1543 case PCI_PRODUCT_INTEL_82801AA_IDE: 1544 case PCI_PRODUCT_INTEL_82801AB_IDE: 1545 case PCI_PRODUCT_INTEL_82801BA_IDE: 1546 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1547 case PCI_PRODUCT_INTEL_82801CA_IDE_1: 1548 case PCI_PRODUCT_INTEL_82801CA_IDE_2: 1549 case PCI_PRODUCT_INTEL_82801DB_IDE: 1550 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1551 } 1552 } 1553 sc->sc_wdcdev.PIO_cap = 4; 1554 sc->sc_wdcdev.DMA_cap = 2; 1555 switch(sc->sc_pp->ide_product) { 1556 case PCI_PRODUCT_INTEL_82801AA_IDE: 1557 sc->sc_wdcdev.UDMA_cap = 4; 1558 break; 1559 case PCI_PRODUCT_INTEL_82801BA_IDE: 1560 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1561 case PCI_PRODUCT_INTEL_82801CA_IDE_1: 1562 case PCI_PRODUCT_INTEL_82801CA_IDE_2: 1563 case PCI_PRODUCT_INTEL_82801DB_IDE: 1564 sc->sc_wdcdev.UDMA_cap = 5; 1565 break; 1566 default: 1567 sc->sc_wdcdev.UDMA_cap = 2; 1568 } 1569 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1570 sc->sc_wdcdev.set_modes = piix_setup_channel; 1571 else 1572 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1573 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1574 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1575 1576 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1577 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1578 DEBUG_PROBE); 1579 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1580 WDCDEBUG_PRINT((", sidetim=0x%x", 1581 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1582 DEBUG_PROBE); 1583 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1584 WDCDEBUG_PRINT((", udamreg 0x%x", 1585 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1586 DEBUG_PROBE); 1587 } 1588 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1589 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1590 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1591 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1592 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1593 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || 1594 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) { 1595 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1596 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1597 DEBUG_PROBE); 1598 } 1599 1600 } 1601 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1602 1603 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1604 cp = &sc->pciide_channels[channel]; 1605 /* PIIX is compat-only */ 1606 if (pciide_chansetup(sc, channel, 0) == 0) 1607 continue; 1608 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1609 if ((PIIX_IDETIM_READ(idetim, channel) & 1610 PIIX_IDETIM_IDE) == 0) { 1611 printf("%s: %s channel ignored (disabled)\n", 1612 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1613 continue; 1614 } 1615 /* PIIX are compat-only pciide devices */ 1616 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1617 if (cp->hw_ok == 0) 1618 continue; 1619 if (pciide_chan_candisable(cp)) { 1620 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1621 channel); 1622 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1623 idetim); 1624 } 1625 pciide_map_compat_intr(pa, cp, channel, 0); 1626 if (cp->hw_ok == 0) 1627 continue; 1628 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1629 } 1630 1631 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1632 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1633 DEBUG_PROBE); 1634 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1635 WDCDEBUG_PRINT((", sidetim=0x%x", 1636 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1637 DEBUG_PROBE); 1638 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1639 WDCDEBUG_PRINT((", udamreg 0x%x", 1640 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1641 DEBUG_PROBE); 1642 } 1643 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1644 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1645 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1646 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1647 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1648 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || 1649 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) { 1650 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1651 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1652 DEBUG_PROBE); 1653 } 1654 } 1655 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1656 } 1657 1658 void 1659 piix_setup_channel(chp) 1660 struct channel_softc *chp; 1661 { 1662 u_int8_t mode[2], drive; 1663 u_int32_t oidetim, idetim, idedma_ctl; 1664 struct pciide_channel *cp = (struct pciide_channel*)chp; 1665 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1666 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1667 1668 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1669 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1670 idedma_ctl = 0; 1671 1672 /* set up new idetim: Enable IDE registers decode */ 1673 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1674 chp->channel); 1675 1676 /* setup DMA */ 1677 pciide_channel_dma_setup(cp); 1678 1679 /* 1680 * Here we have to mess up with drives mode: PIIX can't have 1681 * different timings for master and slave drives. 1682 * We need to find the best combination. 1683 */ 1684 1685 /* If both drives supports DMA, take the lower mode */ 1686 if ((drvp[0].drive_flags & DRIVE_DMA) && 1687 (drvp[1].drive_flags & DRIVE_DMA)) { 1688 mode[0] = mode[1] = 1689 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1690 drvp[0].DMA_mode = mode[0]; 1691 drvp[1].DMA_mode = mode[1]; 1692 goto ok; 1693 } 1694 /* 1695 * If only one drive supports DMA, use its mode, and 1696 * put the other one in PIO mode 0 if mode not compatible 1697 */ 1698 if (drvp[0].drive_flags & DRIVE_DMA) { 1699 mode[0] = drvp[0].DMA_mode; 1700 mode[1] = drvp[1].PIO_mode; 1701 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1702 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1703 mode[1] = drvp[1].PIO_mode = 0; 1704 goto ok; 1705 } 1706 if (drvp[1].drive_flags & DRIVE_DMA) { 1707 mode[1] = drvp[1].DMA_mode; 1708 mode[0] = drvp[0].PIO_mode; 1709 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1710 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1711 mode[0] = drvp[0].PIO_mode = 0; 1712 goto ok; 1713 } 1714 /* 1715 * If both drives are not DMA, takes the lower mode, unless 1716 * one of them is PIO mode < 2 1717 */ 1718 if (drvp[0].PIO_mode < 2) { 1719 mode[0] = drvp[0].PIO_mode = 0; 1720 mode[1] = drvp[1].PIO_mode; 1721 } else if (drvp[1].PIO_mode < 2) { 1722 mode[1] = drvp[1].PIO_mode = 0; 1723 mode[0] = drvp[0].PIO_mode; 1724 } else { 1725 mode[0] = mode[1] = 1726 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1727 drvp[0].PIO_mode = mode[0]; 1728 drvp[1].PIO_mode = mode[1]; 1729 } 1730 ok: /* The modes are setup */ 1731 for (drive = 0; drive < 2; drive++) { 1732 if (drvp[drive].drive_flags & DRIVE_DMA) { 1733 idetim |= piix_setup_idetim_timings( 1734 mode[drive], 1, chp->channel); 1735 goto end; 1736 } 1737 } 1738 /* If we are there, none of the drives are DMA */ 1739 if (mode[0] >= 2) 1740 idetim |= piix_setup_idetim_timings( 1741 mode[0], 0, chp->channel); 1742 else 1743 idetim |= piix_setup_idetim_timings( 1744 mode[1], 0, chp->channel); 1745 end: /* 1746 * timing mode is now set up in the controller. Enable 1747 * it per-drive 1748 */ 1749 for (drive = 0; drive < 2; drive++) { 1750 /* If no drive, skip */ 1751 if ((drvp[drive].drive_flags & DRIVE) == 0) 1752 continue; 1753 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1754 if (drvp[drive].drive_flags & DRIVE_DMA) 1755 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1756 } 1757 if (idedma_ctl != 0) { 1758 /* Add software bits in status register */ 1759 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1760 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1761 idedma_ctl); 1762 } 1763 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1764 pciide_print_modes(cp); 1765 } 1766 1767 void 1768 piix3_4_setup_channel(chp) 1769 struct channel_softc *chp; 1770 { 1771 struct ata_drive_datas *drvp; 1772 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1773 struct pciide_channel *cp = (struct pciide_channel*)chp; 1774 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1775 int drive; 1776 int channel = chp->channel; 1777 1778 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1779 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1780 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1781 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1782 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1783 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1784 PIIX_SIDETIM_RTC_MASK(channel)); 1785 1786 idedma_ctl = 0; 1787 /* If channel disabled, no need to go further */ 1788 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1789 return; 1790 /* set up new idetim: Enable IDE registers decode */ 1791 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1792 1793 /* setup DMA if needed */ 1794 pciide_channel_dma_setup(cp); 1795 1796 for (drive = 0; drive < 2; drive++) { 1797 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 1798 PIIX_UDMATIM_SET(0x3, channel, drive)); 1799 drvp = &chp->ch_drive[drive]; 1800 /* If no drive, skip */ 1801 if ((drvp->drive_flags & DRIVE) == 0) 1802 continue; 1803 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1804 (drvp->drive_flags & DRIVE_UDMA) == 0)) 1805 goto pio; 1806 1807 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1808 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1809 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1810 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1811 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1812 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || 1813 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) { 1814 ideconf |= PIIX_CONFIG_PINGPONG; 1815 } 1816 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1817 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1818 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1819 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || 1820 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) { 1821 /* setup Ultra/100 */ 1822 if (drvp->UDMA_mode > 2 && 1823 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1824 drvp->UDMA_mode = 2; 1825 if (drvp->UDMA_mode > 4) { 1826 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 1827 } else { 1828 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 1829 if (drvp->UDMA_mode > 2) { 1830 ideconf |= PIIX_CONFIG_UDMA66(channel, 1831 drive); 1832 } else { 1833 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 1834 drive); 1835 } 1836 } 1837 } 1838 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 1839 /* setup Ultra/66 */ 1840 if (drvp->UDMA_mode > 2 && 1841 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1842 drvp->UDMA_mode = 2; 1843 if (drvp->UDMA_mode > 2) 1844 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 1845 else 1846 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 1847 } 1848 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1849 (drvp->drive_flags & DRIVE_UDMA)) { 1850 /* use Ultra/DMA */ 1851 drvp->drive_flags &= ~DRIVE_DMA; 1852 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 1853 udmareg |= PIIX_UDMATIM_SET( 1854 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 1855 } else { 1856 /* use Multiword DMA */ 1857 drvp->drive_flags &= ~DRIVE_UDMA; 1858 if (drive == 0) { 1859 idetim |= piix_setup_idetim_timings( 1860 drvp->DMA_mode, 1, channel); 1861 } else { 1862 sidetim |= piix_setup_sidetim_timings( 1863 drvp->DMA_mode, 1, channel); 1864 idetim =PIIX_IDETIM_SET(idetim, 1865 PIIX_IDETIM_SITRE, channel); 1866 } 1867 } 1868 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1869 1870 pio: /* use PIO mode */ 1871 idetim |= piix_setup_idetim_drvs(drvp); 1872 if (drive == 0) { 1873 idetim |= piix_setup_idetim_timings( 1874 drvp->PIO_mode, 0, channel); 1875 } else { 1876 sidetim |= piix_setup_sidetim_timings( 1877 drvp->PIO_mode, 0, channel); 1878 idetim =PIIX_IDETIM_SET(idetim, 1879 PIIX_IDETIM_SITRE, channel); 1880 } 1881 } 1882 if (idedma_ctl != 0) { 1883 /* Add software bits in status register */ 1884 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1885 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1886 idedma_ctl); 1887 } 1888 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1889 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 1890 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 1891 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 1892 pciide_print_modes(cp); 1893 } 1894 1895 1896 /* setup ISP and RTC fields, based on mode */ 1897 static u_int32_t 1898 piix_setup_idetim_timings(mode, dma, channel) 1899 u_int8_t mode; 1900 u_int8_t dma; 1901 u_int8_t channel; 1902 { 1903 1904 if (dma) 1905 return PIIX_IDETIM_SET(0, 1906 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 1907 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 1908 channel); 1909 else 1910 return PIIX_IDETIM_SET(0, 1911 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 1912 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 1913 channel); 1914 } 1915 1916 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 1917 static u_int32_t 1918 piix_setup_idetim_drvs(drvp) 1919 struct ata_drive_datas *drvp; 1920 { 1921 u_int32_t ret = 0; 1922 struct channel_softc *chp = drvp->chnl_softc; 1923 u_int8_t channel = chp->channel; 1924 u_int8_t drive = drvp->drive; 1925 1926 /* 1927 * If drive is using UDMA, timings setups are independant 1928 * So just check DMA and PIO here. 1929 */ 1930 if (drvp->drive_flags & DRIVE_DMA) { 1931 /* if mode = DMA mode 0, use compatible timings */ 1932 if ((drvp->drive_flags & DRIVE_DMA) && 1933 drvp->DMA_mode == 0) { 1934 drvp->PIO_mode = 0; 1935 return ret; 1936 } 1937 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1938 /* 1939 * PIO and DMA timings are the same, use fast timings for PIO 1940 * too, else use compat timings. 1941 */ 1942 if ((piix_isp_pio[drvp->PIO_mode] != 1943 piix_isp_dma[drvp->DMA_mode]) || 1944 (piix_rtc_pio[drvp->PIO_mode] != 1945 piix_rtc_dma[drvp->DMA_mode])) 1946 drvp->PIO_mode = 0; 1947 /* if PIO mode <= 2, use compat timings for PIO */ 1948 if (drvp->PIO_mode <= 2) { 1949 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 1950 channel); 1951 return ret; 1952 } 1953 } 1954 1955 /* 1956 * Now setup PIO modes. If mode < 2, use compat timings. 1957 * Else enable fast timings. Enable IORDY and prefetch/post 1958 * if PIO mode >= 3. 1959 */ 1960 1961 if (drvp->PIO_mode < 2) 1962 return ret; 1963 1964 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1965 if (drvp->PIO_mode >= 3) { 1966 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 1967 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 1968 } 1969 return ret; 1970 } 1971 1972 /* setup values in SIDETIM registers, based on mode */ 1973 static u_int32_t 1974 piix_setup_sidetim_timings(mode, dma, channel) 1975 u_int8_t mode; 1976 u_int8_t dma; 1977 u_int8_t channel; 1978 { 1979 if (dma) 1980 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 1981 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 1982 else 1983 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 1984 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 1985 } 1986 1987 void 1988 amd7x6_chip_map(sc, pa) 1989 struct pciide_softc *sc; 1990 struct pci_attach_args *pa; 1991 { 1992 struct pciide_channel *cp; 1993 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1994 int channel; 1995 pcireg_t chanenable; 1996 bus_size_t cmdsize, ctlsize; 1997 1998 if (pciide_chipen(sc, pa) == 0) 1999 return; 2000 printf("%s: bus-master DMA support present", 2001 sc->sc_wdcdev.sc_dev.dv_xname); 2002 pciide_mapreg_dma(sc, pa); 2003 printf("\n"); 2004 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2005 WDC_CAPABILITY_MODE; 2006 if (sc->sc_dma_ok) { 2007 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2008 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2009 sc->sc_wdcdev.irqack = pciide_irqack; 2010 } 2011 sc->sc_wdcdev.PIO_cap = 4; 2012 sc->sc_wdcdev.DMA_cap = 2; 2013 2014 switch (sc->sc_pp->ide_product) { 2015 case PCI_PRODUCT_AMD_PBC766_IDE: 2016 case PCI_PRODUCT_AMD_PBC768_IDE: 2017 case PCI_PRODUCT_AMD_PBC8111_IDE: 2018 sc->sc_wdcdev.UDMA_cap = 5; 2019 break; 2020 default: 2021 sc->sc_wdcdev.UDMA_cap = 4; 2022 } 2023 sc->sc_wdcdev.set_modes = amd7x6_setup_channel; 2024 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2025 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2026 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN); 2027 2028 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable), 2029 DEBUG_PROBE); 2030 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2031 cp = &sc->pciide_channels[channel]; 2032 if (pciide_chansetup(sc, channel, interface) == 0) 2033 continue; 2034 2035 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) { 2036 printf("%s: %s channel ignored (disabled)\n", 2037 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2038 continue; 2039 } 2040 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2041 pciide_pci_intr); 2042 2043 if (pciide_chan_candisable(cp)) 2044 chanenable &= ~AMD7X6_CHAN_EN(channel); 2045 pciide_map_compat_intr(pa, cp, channel, interface); 2046 if (cp->hw_ok == 0) 2047 continue; 2048 2049 amd7x6_setup_channel(&cp->wdc_channel); 2050 } 2051 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN, 2052 chanenable); 2053 return; 2054 } 2055 2056 void 2057 amd7x6_setup_channel(chp) 2058 struct channel_softc *chp; 2059 { 2060 u_int32_t udmatim_reg, datatim_reg; 2061 u_int8_t idedma_ctl; 2062 int mode, drive; 2063 struct ata_drive_datas *drvp; 2064 struct pciide_channel *cp = (struct pciide_channel*)chp; 2065 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2066 #ifndef PCIIDE_AMD756_ENABLEDMA 2067 int rev = PCI_REVISION( 2068 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2069 #endif 2070 2071 idedma_ctl = 0; 2072 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM); 2073 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA); 2074 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel); 2075 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel); 2076 2077 /* setup DMA if needed */ 2078 pciide_channel_dma_setup(cp); 2079 2080 for (drive = 0; drive < 2; drive++) { 2081 drvp = &chp->ch_drive[drive]; 2082 /* If no drive, skip */ 2083 if ((drvp->drive_flags & DRIVE) == 0) 2084 continue; 2085 /* add timing values, setup DMA if needed */ 2086 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2087 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2088 mode = drvp->PIO_mode; 2089 goto pio; 2090 } 2091 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2092 (drvp->drive_flags & DRIVE_UDMA)) { 2093 /* use Ultra/DMA */ 2094 drvp->drive_flags &= ~DRIVE_DMA; 2095 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) | 2096 AMD7X6_UDMA_EN_MTH(chp->channel, drive) | 2097 AMD7X6_UDMA_TIME(chp->channel, drive, 2098 amd7x6_udma_tim[drvp->UDMA_mode]); 2099 /* can use PIO timings, MW DMA unused */ 2100 mode = drvp->PIO_mode; 2101 } else { 2102 /* use Multiword DMA, but only if revision is OK */ 2103 drvp->drive_flags &= ~DRIVE_UDMA; 2104 #ifndef PCIIDE_AMD756_ENABLEDMA 2105 /* 2106 * The workaround doesn't seem to be necessary 2107 * with all drives, so it can be disabled by 2108 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 2109 * triggered. 2110 */ 2111 if (sc->sc_pp->ide_product == 2112 PCI_PRODUCT_AMD_PBC756_IDE && 2113 AMD756_CHIPREV_DISABLEDMA(rev)) { 2114 printf("%s:%d:%d: multi-word DMA disabled due " 2115 "to chip revision\n", 2116 sc->sc_wdcdev.sc_dev.dv_xname, 2117 chp->channel, drive); 2118 mode = drvp->PIO_mode; 2119 drvp->drive_flags &= ~DRIVE_DMA; 2120 goto pio; 2121 } 2122 #endif 2123 /* mode = min(pio, dma+2) */ 2124 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2125 mode = drvp->PIO_mode; 2126 else 2127 mode = drvp->DMA_mode + 2; 2128 } 2129 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2130 2131 pio: /* setup PIO mode */ 2132 if (mode <= 2) { 2133 drvp->DMA_mode = 0; 2134 drvp->PIO_mode = 0; 2135 mode = 0; 2136 } else { 2137 drvp->PIO_mode = mode; 2138 drvp->DMA_mode = mode - 2; 2139 } 2140 datatim_reg |= 2141 AMD7X6_DATATIM_PULSE(chp->channel, drive, 2142 amd7x6_pio_set[mode]) | 2143 AMD7X6_DATATIM_RECOV(chp->channel, drive, 2144 amd7x6_pio_rec[mode]); 2145 } 2146 if (idedma_ctl != 0) { 2147 /* Add software bits in status register */ 2148 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2149 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2150 idedma_ctl); 2151 } 2152 pciide_print_modes(cp); 2153 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg); 2154 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg); 2155 } 2156 2157 void 2158 apollo_chip_map(sc, pa) 2159 struct pciide_softc *sc; 2160 struct pci_attach_args *pa; 2161 { 2162 struct pciide_channel *cp; 2163 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2164 int channel; 2165 u_int32_t ideconf; 2166 bus_size_t cmdsize, ctlsize; 2167 pcitag_t pcib_tag; 2168 pcireg_t pcib_id, pcib_class; 2169 2170 if (pciide_chipen(sc, pa) == 0) 2171 return; 2172 /* get a PCI tag for the ISA bridge (function 0 of the same device) */ 2173 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 2174 /* and read ID and rev of the ISA bridge */ 2175 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG); 2176 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG); 2177 printf(": VIA Technologies "); 2178 switch (PCI_PRODUCT(pcib_id)) { 2179 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 2180 printf("VT82C586 (Apollo VP) "); 2181 if(PCI_REVISION(pcib_class) >= 0x02) { 2182 printf("ATA33 controller\n"); 2183 sc->sc_wdcdev.UDMA_cap = 2; 2184 } else { 2185 printf("controller\n"); 2186 sc->sc_wdcdev.UDMA_cap = 0; 2187 } 2188 break; 2189 case PCI_PRODUCT_VIATECH_VT82C596A: 2190 printf("VT82C596A (Apollo Pro) "); 2191 if (PCI_REVISION(pcib_class) >= 0x12) { 2192 printf("ATA66 controller\n"); 2193 sc->sc_wdcdev.UDMA_cap = 4; 2194 } else { 2195 printf("ATA33 controller\n"); 2196 sc->sc_wdcdev.UDMA_cap = 2; 2197 } 2198 break; 2199 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 2200 printf("VT82C686A (Apollo KX133) "); 2201 if (PCI_REVISION(pcib_class) >= 0x40) { 2202 printf("ATA100 controller\n"); 2203 sc->sc_wdcdev.UDMA_cap = 5; 2204 } else { 2205 printf("ATA66 controller\n"); 2206 sc->sc_wdcdev.UDMA_cap = 4; 2207 } 2208 break; 2209 case PCI_PRODUCT_VIATECH_VT8231: 2210 printf("VT8231 ATA100 controller\n"); 2211 sc->sc_wdcdev.UDMA_cap = 5; 2212 break; 2213 case PCI_PRODUCT_VIATECH_VT8233: 2214 printf("VT8233 ATA100 controller\n"); 2215 sc->sc_wdcdev.UDMA_cap = 5; 2216 break; 2217 case PCI_PRODUCT_VIATECH_VT8233A: 2218 printf("VT8233A ATA133 controller\n"); 2219 sc->sc_wdcdev.UDMA_cap = 6; 2220 break; 2221 default: 2222 printf("unknown ATA controller\n"); 2223 sc->sc_wdcdev.UDMA_cap = 0; 2224 } 2225 2226 printf("%s: bus-master DMA support present", 2227 sc->sc_wdcdev.sc_dev.dv_xname); 2228 pciide_mapreg_dma(sc, pa); 2229 printf("\n"); 2230 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2231 WDC_CAPABILITY_MODE; 2232 if (sc->sc_dma_ok) { 2233 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2234 sc->sc_wdcdev.irqack = pciide_irqack; 2235 if (sc->sc_wdcdev.UDMA_cap > 0) 2236 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2237 } 2238 sc->sc_wdcdev.PIO_cap = 4; 2239 sc->sc_wdcdev.DMA_cap = 2; 2240 sc->sc_wdcdev.set_modes = apollo_setup_channel; 2241 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2242 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2243 2244 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 2245 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2246 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 2247 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 2248 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2249 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 2250 DEBUG_PROBE); 2251 2252 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2253 cp = &sc->pciide_channels[channel]; 2254 if (pciide_chansetup(sc, channel, interface) == 0) 2255 continue; 2256 2257 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 2258 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 2259 printf("%s: %s channel ignored (disabled)\n", 2260 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2261 continue; 2262 } 2263 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2264 pciide_pci_intr); 2265 if (cp->hw_ok == 0) 2266 continue; 2267 if (pciide_chan_candisable(cp)) { 2268 ideconf &= ~APO_IDECONF_EN(channel); 2269 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 2270 ideconf); 2271 } 2272 pciide_map_compat_intr(pa, cp, channel, interface); 2273 2274 if (cp->hw_ok == 0) 2275 continue; 2276 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 2277 } 2278 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2279 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2280 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 2281 } 2282 2283 void 2284 apollo_setup_channel(chp) 2285 struct channel_softc *chp; 2286 { 2287 u_int32_t udmatim_reg, datatim_reg; 2288 u_int8_t idedma_ctl; 2289 int mode, drive; 2290 struct ata_drive_datas *drvp; 2291 struct pciide_channel *cp = (struct pciide_channel*)chp; 2292 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2293 2294 idedma_ctl = 0; 2295 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 2296 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 2297 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 2298 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 2299 2300 /* setup DMA if needed */ 2301 pciide_channel_dma_setup(cp); 2302 2303 for (drive = 0; drive < 2; drive++) { 2304 drvp = &chp->ch_drive[drive]; 2305 /* If no drive, skip */ 2306 if ((drvp->drive_flags & DRIVE) == 0) 2307 continue; 2308 /* add timing values, setup DMA if needed */ 2309 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2310 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2311 mode = drvp->PIO_mode; 2312 goto pio; 2313 } 2314 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2315 (drvp->drive_flags & DRIVE_UDMA)) { 2316 /* use Ultra/DMA */ 2317 drvp->drive_flags &= ~DRIVE_DMA; 2318 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2319 APO_UDMA_EN_MTH(chp->channel, drive); 2320 if (sc->sc_wdcdev.UDMA_cap == 6) { 2321 /* 8233a */ 2322 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2323 drive, apollo_udma133_tim[drvp->UDMA_mode]); 2324 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 2325 /* 686b */ 2326 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2327 drive, apollo_udma100_tim[drvp->UDMA_mode]); 2328 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 2329 /* 596b or 686a */ 2330 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 2331 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2332 drive, apollo_udma66_tim[drvp->UDMA_mode]); 2333 } else { 2334 /* 596a or 586b */ 2335 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2336 drive, apollo_udma33_tim[drvp->UDMA_mode]); 2337 } 2338 /* can use PIO timings, MW DMA unused */ 2339 mode = drvp->PIO_mode; 2340 } else { 2341 /* use Multiword DMA */ 2342 drvp->drive_flags &= ~DRIVE_UDMA; 2343 /* mode = min(pio, dma+2) */ 2344 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2345 mode = drvp->PIO_mode; 2346 else 2347 mode = drvp->DMA_mode + 2; 2348 } 2349 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2350 2351 pio: /* setup PIO mode */ 2352 if (mode <= 2) { 2353 drvp->DMA_mode = 0; 2354 drvp->PIO_mode = 0; 2355 mode = 0; 2356 } else { 2357 drvp->PIO_mode = mode; 2358 drvp->DMA_mode = mode - 2; 2359 } 2360 datatim_reg |= 2361 APO_DATATIM_PULSE(chp->channel, drive, 2362 apollo_pio_set[mode]) | 2363 APO_DATATIM_RECOV(chp->channel, drive, 2364 apollo_pio_rec[mode]); 2365 } 2366 if (idedma_ctl != 0) { 2367 /* Add software bits in status register */ 2368 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2369 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2370 idedma_ctl); 2371 } 2372 pciide_print_modes(cp); 2373 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2374 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2375 } 2376 2377 void 2378 cmd_channel_map(pa, sc, channel) 2379 struct pci_attach_args *pa; 2380 struct pciide_softc *sc; 2381 int channel; 2382 { 2383 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2384 bus_size_t cmdsize, ctlsize; 2385 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2386 int interface, one_channel; 2387 2388 /* 2389 * The 0648/0649 can be told to identify as a RAID controller. 2390 * In this case, we have to fake interface 2391 */ 2392 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2393 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2394 PCIIDE_INTERFACE_SETTABLE(1); 2395 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2396 CMD_CONF_DSA1) 2397 interface |= PCIIDE_INTERFACE_PCI(0) | 2398 PCIIDE_INTERFACE_PCI(1); 2399 } else { 2400 interface = PCI_INTERFACE(pa->pa_class); 2401 } 2402 2403 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2404 cp->name = PCIIDE_CHANNEL_NAME(channel); 2405 cp->wdc_channel.channel = channel; 2406 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2407 2408 /* 2409 * Older CMD64X doesn't have independant channels 2410 */ 2411 switch (sc->sc_pp->ide_product) { 2412 case PCI_PRODUCT_CMDTECH_649: 2413 one_channel = 0; 2414 break; 2415 default: 2416 one_channel = 1; 2417 break; 2418 } 2419 2420 if (channel > 0 && one_channel) { 2421 cp->wdc_channel.ch_queue = 2422 sc->pciide_channels[0].wdc_channel.ch_queue; 2423 } else { 2424 cp->wdc_channel.ch_queue = 2425 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2426 } 2427 if (cp->wdc_channel.ch_queue == NULL) { 2428 printf("%s %s channel: " 2429 "can't allocate memory for command queue", 2430 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2431 return; 2432 } 2433 2434 printf("%s: %s channel %s to %s mode\n", 2435 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2436 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2437 "configured" : "wired", 2438 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2439 "native-PCI" : "compatibility"); 2440 2441 /* 2442 * with a CMD PCI64x, if we get here, the first channel is enabled: 2443 * there's no way to disable the first channel without disabling 2444 * the whole device 2445 */ 2446 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2447 printf("%s: %s channel ignored (disabled)\n", 2448 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2449 return; 2450 } 2451 2452 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2453 if (cp->hw_ok == 0) 2454 return; 2455 if (channel == 1) { 2456 if (pciide_chan_candisable(cp)) { 2457 ctrl &= ~CMD_CTRL_2PORT; 2458 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2459 CMD_CTRL, ctrl); 2460 } 2461 } 2462 pciide_map_compat_intr(pa, cp, channel, interface); 2463 } 2464 2465 int 2466 cmd_pci_intr(arg) 2467 void *arg; 2468 { 2469 struct pciide_softc *sc = arg; 2470 struct pciide_channel *cp; 2471 struct channel_softc *wdc_cp; 2472 int i, rv, crv; 2473 u_int32_t priirq, secirq; 2474 2475 rv = 0; 2476 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2477 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2478 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2479 cp = &sc->pciide_channels[i]; 2480 wdc_cp = &cp->wdc_channel; 2481 /* If a compat channel skip. */ 2482 if (cp->compat) 2483 continue; 2484 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2485 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2486 crv = wdcintr(wdc_cp); 2487 if (crv == 0) 2488 printf("%s:%d: bogus intr\n", 2489 sc->sc_wdcdev.sc_dev.dv_xname, i); 2490 else 2491 rv = 1; 2492 } 2493 } 2494 return rv; 2495 } 2496 2497 void 2498 cmd_chip_map(sc, pa) 2499 struct pciide_softc *sc; 2500 struct pci_attach_args *pa; 2501 { 2502 int channel; 2503 2504 /* 2505 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2506 * and base adresses registers can be disabled at 2507 * hardware level. In this case, the device is wired 2508 * in compat mode and its first channel is always enabled, 2509 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2510 * In fact, it seems that the first channel of the CMD PCI0640 2511 * can't be disabled. 2512 */ 2513 2514 #ifdef PCIIDE_CMD064x_DISABLE 2515 if (pciide_chipen(sc, pa) == 0) 2516 return; 2517 #endif 2518 2519 printf("%s: hardware does not support DMA\n", 2520 sc->sc_wdcdev.sc_dev.dv_xname); 2521 sc->sc_dma_ok = 0; 2522 2523 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2524 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2525 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2526 2527 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2528 cmd_channel_map(pa, sc, channel); 2529 } 2530 } 2531 2532 void 2533 cmd0643_9_chip_map(sc, pa) 2534 struct pciide_softc *sc; 2535 struct pci_attach_args *pa; 2536 { 2537 struct pciide_channel *cp; 2538 int channel; 2539 pcireg_t rev = PCI_REVISION(pa->pa_class); 2540 2541 /* 2542 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2543 * and base adresses registers can be disabled at 2544 * hardware level. In this case, the device is wired 2545 * in compat mode and its first channel is always enabled, 2546 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2547 * In fact, it seems that the first channel of the CMD PCI0640 2548 * can't be disabled. 2549 */ 2550 2551 #ifdef PCIIDE_CMD064x_DISABLE 2552 if (pciide_chipen(sc, pa) == 0) 2553 return; 2554 #endif 2555 printf("%s: bus-master DMA support present", 2556 sc->sc_wdcdev.sc_dev.dv_xname); 2557 pciide_mapreg_dma(sc, pa); 2558 printf("\n"); 2559 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2560 WDC_CAPABILITY_MODE; 2561 if (sc->sc_dma_ok) { 2562 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2563 switch (sc->sc_pp->ide_product) { 2564 case PCI_PRODUCT_CMDTECH_649: 2565 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2566 sc->sc_wdcdev.UDMA_cap = 5; 2567 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2568 break; 2569 case PCI_PRODUCT_CMDTECH_648: 2570 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2571 sc->sc_wdcdev.UDMA_cap = 4; 2572 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2573 break; 2574 case PCI_PRODUCT_CMDTECH_646: 2575 if (rev >= CMD0646U2_REV) { 2576 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2577 sc->sc_wdcdev.UDMA_cap = 2; 2578 } else if (rev >= CMD0646U_REV) { 2579 /* 2580 * Linux's driver claims that the 646U is broken 2581 * with UDMA. Only enable it if we know what we're 2582 * doing 2583 */ 2584 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2585 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2586 sc->sc_wdcdev.UDMA_cap = 2; 2587 #endif 2588 /* explicitly disable UDMA */ 2589 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2590 CMD_UDMATIM(0), 0); 2591 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2592 CMD_UDMATIM(1), 0); 2593 } 2594 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2595 break; 2596 default: 2597 sc->sc_wdcdev.irqack = pciide_irqack; 2598 } 2599 } 2600 2601 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2602 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2603 sc->sc_wdcdev.PIO_cap = 4; 2604 sc->sc_wdcdev.DMA_cap = 2; 2605 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2606 2607 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2608 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2609 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2610 DEBUG_PROBE); 2611 2612 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2613 cp = &sc->pciide_channels[channel]; 2614 cmd_channel_map(pa, sc, channel); 2615 if (cp->hw_ok == 0) 2616 continue; 2617 cmd0643_9_setup_channel(&cp->wdc_channel); 2618 } 2619 /* 2620 * note - this also makes sure we clear the irq disable and reset 2621 * bits 2622 */ 2623 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2624 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2625 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2626 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2627 DEBUG_PROBE); 2628 } 2629 2630 void 2631 cmd0643_9_setup_channel(chp) 2632 struct channel_softc *chp; 2633 { 2634 struct ata_drive_datas *drvp; 2635 u_int8_t tim; 2636 u_int32_t idedma_ctl, udma_reg; 2637 int drive; 2638 struct pciide_channel *cp = (struct pciide_channel*)chp; 2639 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2640 2641 idedma_ctl = 0; 2642 /* setup DMA if needed */ 2643 pciide_channel_dma_setup(cp); 2644 2645 for (drive = 0; drive < 2; drive++) { 2646 drvp = &chp->ch_drive[drive]; 2647 /* If no drive, skip */ 2648 if ((drvp->drive_flags & DRIVE) == 0) 2649 continue; 2650 /* add timing values, setup DMA if needed */ 2651 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2652 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2653 if (drvp->drive_flags & DRIVE_UDMA) { 2654 /* UltraDMA on a 646U2, 0648 or 0649 */ 2655 drvp->drive_flags &= ~DRIVE_DMA; 2656 udma_reg = pciide_pci_read(sc->sc_pc, 2657 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2658 if (drvp->UDMA_mode > 2 && 2659 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2660 CMD_BICSR) & 2661 CMD_BICSR_80(chp->channel)) == 0) 2662 drvp->UDMA_mode = 2; 2663 if (drvp->UDMA_mode > 2) 2664 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2665 else if (sc->sc_wdcdev.UDMA_cap > 2) 2666 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2667 udma_reg |= CMD_UDMATIM_UDMA(drive); 2668 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2669 CMD_UDMATIM_TIM_OFF(drive)); 2670 udma_reg |= 2671 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 2672 CMD_UDMATIM_TIM_OFF(drive)); 2673 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2674 CMD_UDMATIM(chp->channel), udma_reg); 2675 } else { 2676 /* 2677 * use Multiword DMA. 2678 * Timings will be used for both PIO and DMA, 2679 * so adjust DMA mode if needed 2680 * if we have a 0646U2/8/9, turn off UDMA 2681 */ 2682 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2683 udma_reg = pciide_pci_read(sc->sc_pc, 2684 sc->sc_tag, 2685 CMD_UDMATIM(chp->channel)); 2686 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2687 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2688 CMD_UDMATIM(chp->channel), 2689 udma_reg); 2690 } 2691 if (drvp->PIO_mode >= 3 && 2692 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2693 drvp->DMA_mode = drvp->PIO_mode - 2; 2694 } 2695 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2696 } 2697 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2698 } 2699 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2700 CMD_DATA_TIM(chp->channel, drive), tim); 2701 } 2702 if (idedma_ctl != 0) { 2703 /* Add software bits in status register */ 2704 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2705 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2706 idedma_ctl); 2707 } 2708 pciide_print_modes(cp); 2709 } 2710 2711 void 2712 cmd646_9_irqack(chp) 2713 struct channel_softc *chp; 2714 { 2715 u_int32_t priirq, secirq; 2716 struct pciide_channel *cp = (struct pciide_channel*)chp; 2717 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2718 2719 if (chp->channel == 0) { 2720 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2721 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2722 } else { 2723 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2724 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2725 } 2726 pciide_irqack(chp); 2727 } 2728 2729 void 2730 cmd680_chip_map(sc, pa) 2731 struct pciide_softc *sc; 2732 struct pci_attach_args *pa; 2733 { 2734 struct pciide_channel *cp; 2735 int channel; 2736 2737 if (pciide_chipen(sc, pa) == 0) 2738 return; 2739 printf("%s: bus-master DMA support present", 2740 sc->sc_wdcdev.sc_dev.dv_xname); 2741 pciide_mapreg_dma(sc, pa); 2742 printf("\n"); 2743 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2744 WDC_CAPABILITY_MODE; 2745 if (sc->sc_dma_ok) { 2746 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2747 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2748 sc->sc_wdcdev.UDMA_cap = 6; 2749 sc->sc_wdcdev.irqack = pciide_irqack; 2750 } 2751 2752 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2753 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2754 sc->sc_wdcdev.PIO_cap = 4; 2755 sc->sc_wdcdev.DMA_cap = 2; 2756 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 2757 2758 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 2759 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 2760 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 2761 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 2762 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2763 cp = &sc->pciide_channels[channel]; 2764 cmd680_channel_map(pa, sc, channel); 2765 if (cp->hw_ok == 0) 2766 continue; 2767 cmd680_setup_channel(&cp->wdc_channel); 2768 } 2769 } 2770 2771 void 2772 cmd680_channel_map(pa, sc, channel) 2773 struct pci_attach_args *pa; 2774 struct pciide_softc *sc; 2775 int channel; 2776 { 2777 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2778 bus_size_t cmdsize, ctlsize; 2779 int interface, i, reg; 2780 static const u_int8_t init_val[] = 2781 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 2782 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 2783 2784 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2785 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2786 PCIIDE_INTERFACE_SETTABLE(1); 2787 interface |= PCIIDE_INTERFACE_PCI(0) | 2788 PCIIDE_INTERFACE_PCI(1); 2789 } else { 2790 interface = PCI_INTERFACE(pa->pa_class); 2791 } 2792 2793 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2794 cp->name = PCIIDE_CHANNEL_NAME(channel); 2795 cp->wdc_channel.channel = channel; 2796 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2797 2798 cp->wdc_channel.ch_queue = 2799 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2800 if (cp->wdc_channel.ch_queue == NULL) { 2801 printf("%s %s channel: " 2802 "can't allocate memory for command queue", 2803 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2804 return; 2805 } 2806 2807 /* XXX */ 2808 reg = 0xa2 + channel * 16; 2809 for (i = 0; i < sizeof(init_val); i++) 2810 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 2811 2812 printf("%s: %s channel %s to %s mode\n", 2813 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2814 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2815 "configured" : "wired", 2816 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2817 "native-PCI" : "compatibility"); 2818 2819 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 2820 if (cp->hw_ok == 0) 2821 return; 2822 pciide_map_compat_intr(pa, cp, channel, interface); 2823 } 2824 2825 void 2826 cmd680_setup_channel(chp) 2827 struct channel_softc *chp; 2828 { 2829 struct ata_drive_datas *drvp; 2830 u_int8_t mode, off, scsc; 2831 u_int16_t val; 2832 u_int32_t idedma_ctl; 2833 int drive; 2834 struct pciide_channel *cp = (struct pciide_channel*)chp; 2835 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2836 pci_chipset_tag_t pc = sc->sc_pc; 2837 pcitag_t pa = sc->sc_tag; 2838 static const u_int8_t udma2_tbl[] = 2839 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 2840 static const u_int8_t udma_tbl[] = 2841 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 2842 static const u_int16_t dma_tbl[] = 2843 { 0x2208, 0x10c2, 0x10c1 }; 2844 static const u_int16_t pio_tbl[] = 2845 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 2846 2847 idedma_ctl = 0; 2848 pciide_channel_dma_setup(cp); 2849 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 2850 2851 for (drive = 0; drive < 2; drive++) { 2852 drvp = &chp->ch_drive[drive]; 2853 /* If no drive, skip */ 2854 if ((drvp->drive_flags & DRIVE) == 0) 2855 continue; 2856 mode &= ~(0x03 << (drive * 4)); 2857 if (drvp->drive_flags & DRIVE_UDMA) { 2858 drvp->drive_flags &= ~DRIVE_DMA; 2859 off = 0xa0 + chp->channel * 16; 2860 if (drvp->UDMA_mode > 2 && 2861 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 2862 drvp->UDMA_mode = 2; 2863 scsc = pciide_pci_read(pc, pa, 0x8a); 2864 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 2865 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 2866 scsc = pciide_pci_read(pc, pa, 0x8a); 2867 if ((scsc & 0x30) == 0) 2868 drvp->UDMA_mode = 5; 2869 } 2870 mode |= 0x03 << (drive * 4); 2871 off = 0xac + chp->channel * 16 + drive * 2; 2872 val = pciide_pci_read(pc, pa, off) & ~0x3f; 2873 if (scsc & 0x30) 2874 val |= udma2_tbl[drvp->UDMA_mode]; 2875 else 2876 val |= udma_tbl[drvp->UDMA_mode]; 2877 pciide_pci_write(pc, pa, off, val); 2878 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2879 } else if (drvp->drive_flags & DRIVE_DMA) { 2880 mode |= 0x02 << (drive * 4); 2881 off = 0xa8 + chp->channel * 16 + drive * 2; 2882 val = dma_tbl[drvp->DMA_mode]; 2883 pciide_pci_write(pc, pa, off, val & 0xff); 2884 pciide_pci_write(pc, pa, off, val >> 8); 2885 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2886 } else { 2887 mode |= 0x01 << (drive * 4); 2888 off = 0xa4 + chp->channel * 16 + drive * 2; 2889 val = pio_tbl[drvp->PIO_mode]; 2890 pciide_pci_write(pc, pa, off, val & 0xff); 2891 pciide_pci_write(pc, pa, off, val >> 8); 2892 } 2893 } 2894 2895 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 2896 if (idedma_ctl != 0) { 2897 /* Add software bits in status register */ 2898 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2899 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2900 idedma_ctl); 2901 } 2902 pciide_print_modes(cp); 2903 } 2904 2905 void 2906 cy693_chip_map(sc, pa) 2907 struct pciide_softc *sc; 2908 struct pci_attach_args *pa; 2909 { 2910 struct pciide_channel *cp; 2911 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2912 bus_size_t cmdsize, ctlsize; 2913 2914 if (pciide_chipen(sc, pa) == 0) 2915 return; 2916 /* 2917 * this chip has 2 PCI IDE functions, one for primary and one for 2918 * secondary. So we need to call pciide_mapregs_compat() with 2919 * the real channel 2920 */ 2921 if (pa->pa_function == 1) { 2922 sc->sc_cy_compatchan = 0; 2923 } else if (pa->pa_function == 2) { 2924 sc->sc_cy_compatchan = 1; 2925 } else { 2926 printf("%s: unexpected PCI function %d\n", 2927 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2928 return; 2929 } 2930 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2931 printf("%s: bus-master DMA support present", 2932 sc->sc_wdcdev.sc_dev.dv_xname); 2933 pciide_mapreg_dma(sc, pa); 2934 } else { 2935 printf("%s: hardware does not support DMA", 2936 sc->sc_wdcdev.sc_dev.dv_xname); 2937 sc->sc_dma_ok = 0; 2938 } 2939 printf("\n"); 2940 2941 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 2942 if (sc->sc_cy_handle == NULL) { 2943 printf("%s: unable to map hyperCache control registers\n", 2944 sc->sc_wdcdev.sc_dev.dv_xname); 2945 sc->sc_dma_ok = 0; 2946 } 2947 2948 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2949 WDC_CAPABILITY_MODE; 2950 if (sc->sc_dma_ok) { 2951 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2952 sc->sc_wdcdev.irqack = pciide_irqack; 2953 } 2954 sc->sc_wdcdev.PIO_cap = 4; 2955 sc->sc_wdcdev.DMA_cap = 2; 2956 sc->sc_wdcdev.set_modes = cy693_setup_channel; 2957 2958 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2959 sc->sc_wdcdev.nchannels = 1; 2960 2961 /* Only one channel for this chip; if we are here it's enabled */ 2962 cp = &sc->pciide_channels[0]; 2963 sc->wdc_chanarray[0] = &cp->wdc_channel; 2964 cp->name = PCIIDE_CHANNEL_NAME(0); 2965 cp->wdc_channel.channel = 0; 2966 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2967 cp->wdc_channel.ch_queue = 2968 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2969 if (cp->wdc_channel.ch_queue == NULL) { 2970 printf("%s primary channel: " 2971 "can't allocate memory for command queue", 2972 sc->sc_wdcdev.sc_dev.dv_xname); 2973 return; 2974 } 2975 printf("%s: primary channel %s to ", 2976 sc->sc_wdcdev.sc_dev.dv_xname, 2977 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 2978 "configured" : "wired"); 2979 if (interface & PCIIDE_INTERFACE_PCI(0)) { 2980 printf("native-PCI"); 2981 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 2982 pciide_pci_intr); 2983 } else { 2984 printf("compatibility"); 2985 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 2986 &cmdsize, &ctlsize); 2987 } 2988 printf(" mode\n"); 2989 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2990 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2991 wdcattach(&cp->wdc_channel); 2992 if (pciide_chan_candisable(cp)) { 2993 pci_conf_write(sc->sc_pc, sc->sc_tag, 2994 PCI_COMMAND_STATUS_REG, 0); 2995 } 2996 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 2997 if (cp->hw_ok == 0) 2998 return; 2999 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 3000 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 3001 cy693_setup_channel(&cp->wdc_channel); 3002 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 3003 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 3004 } 3005 3006 void 3007 cy693_setup_channel(chp) 3008 struct channel_softc *chp; 3009 { 3010 struct ata_drive_datas *drvp; 3011 int drive; 3012 u_int32_t cy_cmd_ctrl; 3013 u_int32_t idedma_ctl; 3014 struct pciide_channel *cp = (struct pciide_channel*)chp; 3015 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3016 int dma_mode = -1; 3017 3018 cy_cmd_ctrl = idedma_ctl = 0; 3019 3020 /* setup DMA if needed */ 3021 pciide_channel_dma_setup(cp); 3022 3023 for (drive = 0; drive < 2; drive++) { 3024 drvp = &chp->ch_drive[drive]; 3025 /* If no drive, skip */ 3026 if ((drvp->drive_flags & DRIVE) == 0) 3027 continue; 3028 /* add timing values, setup DMA if needed */ 3029 if (drvp->drive_flags & DRIVE_DMA) { 3030 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3031 /* use Multiword DMA */ 3032 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 3033 dma_mode = drvp->DMA_mode; 3034 } 3035 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 3036 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 3037 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 3038 CY_CMD_CTRL_IOW_REC_OFF(drive)); 3039 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 3040 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 3041 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 3042 CY_CMD_CTRL_IOR_REC_OFF(drive)); 3043 } 3044 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 3045 chp->ch_drive[0].DMA_mode = dma_mode; 3046 chp->ch_drive[1].DMA_mode = dma_mode; 3047 3048 if (dma_mode == -1) 3049 dma_mode = 0; 3050 3051 if (sc->sc_cy_handle != NULL) { 3052 /* Note: `multiple' is implied. */ 3053 cy82c693_write(sc->sc_cy_handle, 3054 (sc->sc_cy_compatchan == 0) ? 3055 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 3056 } 3057 3058 pciide_print_modes(cp); 3059 3060 if (idedma_ctl != 0) { 3061 /* Add software bits in status register */ 3062 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3063 IDEDMA_CTL, idedma_ctl); 3064 } 3065 } 3066 3067 static int 3068 sis_hostbr_match(pa) 3069 struct pci_attach_args *pa; 3070 { 3071 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) && 3072 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) || 3073 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) || 3074 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) || 3075 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) || 3076 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745))); 3077 } 3078 3079 void 3080 sis_chip_map(sc, pa) 3081 struct pciide_softc *sc; 3082 struct pci_attach_args *pa; 3083 { 3084 struct pciide_channel *cp; 3085 int channel; 3086 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 3087 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3088 pcireg_t rev = PCI_REVISION(pa->pa_class); 3089 bus_size_t cmdsize, ctlsize; 3090 pcitag_t pchb_tag; 3091 pcireg_t pchb_id, pchb_class; 3092 3093 if (pciide_chipen(sc, pa) == 0) 3094 return; 3095 printf("%s: bus-master DMA support present", 3096 sc->sc_wdcdev.sc_dev.dv_xname); 3097 pciide_mapreg_dma(sc, pa); 3098 printf("\n"); 3099 3100 /* get a PCI tag for the host bridge (function 0 of the same device) */ 3101 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3102 /* and read ID and rev of the ISA bridge */ 3103 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG); 3104 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG); 3105 3106 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3107 WDC_CAPABILITY_MODE; 3108 if (sc->sc_dma_ok) { 3109 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3110 sc->sc_wdcdev.irqack = pciide_irqack; 3111 /* 3112 * controllers associated to a rev 0x2 530 Host to PCI Bridge 3113 * have problems with UDMA (info provided by Christos) 3114 */ 3115 if (rev >= 0xd0 && 3116 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB || 3117 PCI_REVISION(pchb_class) >= 0x03)) 3118 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3119 } 3120 3121 sc->sc_wdcdev.PIO_cap = 4; 3122 sc->sc_wdcdev.DMA_cap = 2; 3123 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) 3124 /* 3125 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other 3126 * chipsets. 3127 */ 3128 sc->sc_wdcdev.UDMA_cap = 3129 pci_find_device(pa, sis_hostbr_match) ? 5 : 2; 3130 sc->sc_wdcdev.set_modes = sis_setup_channel; 3131 3132 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3133 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3134 3135 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 3136 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 3137 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE); 3138 3139 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3140 cp = &sc->pciide_channels[channel]; 3141 if (pciide_chansetup(sc, channel, interface) == 0) 3142 continue; 3143 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 3144 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 3145 printf("%s: %s channel ignored (disabled)\n", 3146 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3147 continue; 3148 } 3149 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3150 pciide_pci_intr); 3151 if (cp->hw_ok == 0) 3152 continue; 3153 if (pciide_chan_candisable(cp)) { 3154 if (channel == 0) 3155 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 3156 else 3157 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 3158 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 3159 sis_ctr0); 3160 } 3161 pciide_map_compat_intr(pa, cp, channel, interface); 3162 if (cp->hw_ok == 0) 3163 continue; 3164 sis_setup_channel(&cp->wdc_channel); 3165 } 3166 } 3167 3168 void 3169 sis_setup_channel(chp) 3170 struct channel_softc *chp; 3171 { 3172 struct ata_drive_datas *drvp; 3173 int drive; 3174 u_int32_t sis_tim; 3175 u_int32_t idedma_ctl; 3176 struct pciide_channel *cp = (struct pciide_channel*)chp; 3177 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3178 3179 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 3180 "channel %d 0x%x\n", chp->channel, 3181 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 3182 DEBUG_PROBE); 3183 sis_tim = 0; 3184 idedma_ctl = 0; 3185 /* setup DMA if needed */ 3186 pciide_channel_dma_setup(cp); 3187 3188 for (drive = 0; drive < 2; drive++) { 3189 drvp = &chp->ch_drive[drive]; 3190 /* If no drive, skip */ 3191 if ((drvp->drive_flags & DRIVE) == 0) 3192 continue; 3193 /* add timing values, setup DMA if needed */ 3194 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 3195 (drvp->drive_flags & DRIVE_UDMA) == 0) 3196 goto pio; 3197 3198 if (drvp->drive_flags & DRIVE_UDMA) { 3199 /* use Ultra/DMA */ 3200 drvp->drive_flags &= ~DRIVE_DMA; 3201 sis_tim |= sis_udma_tim[drvp->UDMA_mode] << 3202 SIS_TIM_UDMA_TIME_OFF(drive); 3203 sis_tim |= SIS_TIM_UDMA_EN(drive); 3204 } else { 3205 /* 3206 * use Multiword DMA 3207 * Timings will be used for both PIO and DMA, 3208 * so adjust DMA mode if needed 3209 */ 3210 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3211 drvp->PIO_mode = drvp->DMA_mode + 2; 3212 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3213 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3214 drvp->PIO_mode - 2 : 0; 3215 if (drvp->DMA_mode == 0) 3216 drvp->PIO_mode = 0; 3217 } 3218 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3219 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] << 3220 SIS_TIM_ACT_OFF(drive); 3221 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 3222 SIS_TIM_REC_OFF(drive); 3223 } 3224 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 3225 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 3226 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 3227 if (idedma_ctl != 0) { 3228 /* Add software bits in status register */ 3229 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3230 IDEDMA_CTL, idedma_ctl); 3231 } 3232 pciide_print_modes(cp); 3233 } 3234 3235 void 3236 acer_chip_map(sc, pa) 3237 struct pciide_softc *sc; 3238 struct pci_attach_args *pa; 3239 { 3240 struct pciide_channel *cp; 3241 int channel; 3242 pcireg_t cr, interface; 3243 bus_size_t cmdsize, ctlsize; 3244 pcireg_t rev = PCI_REVISION(pa->pa_class); 3245 3246 if (pciide_chipen(sc, pa) == 0) 3247 return; 3248 printf("%s: bus-master DMA support present", 3249 sc->sc_wdcdev.sc_dev.dv_xname); 3250 pciide_mapreg_dma(sc, pa); 3251 printf("\n"); 3252 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3253 WDC_CAPABILITY_MODE; 3254 if (sc->sc_dma_ok) { 3255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 3256 if (rev >= 0x20) { 3257 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3258 if (rev >= 0xC4) 3259 sc->sc_wdcdev.UDMA_cap = 5; 3260 else if (rev >= 0xC2) 3261 sc->sc_wdcdev.UDMA_cap = 4; 3262 else 3263 sc->sc_wdcdev.UDMA_cap = 2; 3264 } 3265 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3266 sc->sc_wdcdev.irqack = pciide_irqack; 3267 } 3268 3269 sc->sc_wdcdev.PIO_cap = 4; 3270 sc->sc_wdcdev.DMA_cap = 2; 3271 sc->sc_wdcdev.set_modes = acer_setup_channel; 3272 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3273 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3274 3275 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 3276 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 3277 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 3278 3279 /* Enable "microsoft register bits" R/W. */ 3280 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 3281 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 3282 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 3283 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 3284 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 3285 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 3286 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 3287 ~ACER_CHANSTATUSREGS_RO); 3288 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 3289 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 3290 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 3291 /* Don't use cr, re-read the real register content instead */ 3292 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 3293 PCI_CLASS_REG)); 3294 3295 /* From linux: enable "Cable Detection" */ 3296 if (rev >= 0xC2) { 3297 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 3298 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 3299 | ACER_0x4B_CDETECT); 3300 } 3301 3302 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3303 cp = &sc->pciide_channels[channel]; 3304 if (pciide_chansetup(sc, channel, interface) == 0) 3305 continue; 3306 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 3307 printf("%s: %s channel ignored (disabled)\n", 3308 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3309 continue; 3310 } 3311 /* newer controllers seems to lack the ACER_CHIDS. Sigh */ 3312 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3313 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 3314 if (cp->hw_ok == 0) 3315 continue; 3316 if (pciide_chan_candisable(cp)) { 3317 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 3318 pci_conf_write(sc->sc_pc, sc->sc_tag, 3319 PCI_CLASS_REG, cr); 3320 } 3321 pciide_map_compat_intr(pa, cp, channel, interface); 3322 acer_setup_channel(&cp->wdc_channel); 3323 } 3324 } 3325 3326 void 3327 acer_setup_channel(chp) 3328 struct channel_softc *chp; 3329 { 3330 struct ata_drive_datas *drvp; 3331 int drive; 3332 u_int32_t acer_fifo_udma; 3333 u_int32_t idedma_ctl; 3334 struct pciide_channel *cp = (struct pciide_channel*)chp; 3335 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3336 3337 idedma_ctl = 0; 3338 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 3339 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 3340 acer_fifo_udma), DEBUG_PROBE); 3341 /* setup DMA if needed */ 3342 pciide_channel_dma_setup(cp); 3343 3344 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 3345 DRIVE_UDMA) { /* check 80 pins cable */ 3346 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 3347 ACER_0x4A_80PIN(chp->channel)) { 3348 if (chp->ch_drive[0].UDMA_mode > 2) 3349 chp->ch_drive[0].UDMA_mode = 2; 3350 if (chp->ch_drive[1].UDMA_mode > 2) 3351 chp->ch_drive[1].UDMA_mode = 2; 3352 } 3353 } 3354 3355 for (drive = 0; drive < 2; drive++) { 3356 drvp = &chp->ch_drive[drive]; 3357 /* If no drive, skip */ 3358 if ((drvp->drive_flags & DRIVE) == 0) 3359 continue; 3360 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 3361 "channel %d drive %d 0x%x\n", chp->channel, drive, 3362 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3363 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 3364 /* clear FIFO/DMA mode */ 3365 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 3366 ACER_UDMA_EN(chp->channel, drive) | 3367 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 3368 3369 /* add timing values, setup DMA if needed */ 3370 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 3371 (drvp->drive_flags & DRIVE_UDMA) == 0) { 3372 acer_fifo_udma |= 3373 ACER_FTH_OPL(chp->channel, drive, 0x1); 3374 goto pio; 3375 } 3376 3377 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 3378 if (drvp->drive_flags & DRIVE_UDMA) { 3379 /* use Ultra/DMA */ 3380 drvp->drive_flags &= ~DRIVE_DMA; 3381 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 3382 acer_fifo_udma |= 3383 ACER_UDMA_TIM(chp->channel, drive, 3384 acer_udma[drvp->UDMA_mode]); 3385 /* XXX disable if one drive < UDMA3 ? */ 3386 if (drvp->UDMA_mode >= 3) { 3387 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3388 ACER_0x4B, 3389 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3390 ACER_0x4B) | ACER_0x4B_UDMA66); 3391 } 3392 } else { 3393 /* 3394 * use Multiword DMA 3395 * Timings will be used for both PIO and DMA, 3396 * so adjust DMA mode if needed 3397 */ 3398 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3399 drvp->PIO_mode = drvp->DMA_mode + 2; 3400 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3401 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3402 drvp->PIO_mode - 2 : 0; 3403 if (drvp->DMA_mode == 0) 3404 drvp->PIO_mode = 0; 3405 } 3406 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3407 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 3408 ACER_IDETIM(chp->channel, drive), 3409 acer_pio[drvp->PIO_mode]); 3410 } 3411 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 3412 acer_fifo_udma), DEBUG_PROBE); 3413 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 3414 if (idedma_ctl != 0) { 3415 /* Add software bits in status register */ 3416 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3417 IDEDMA_CTL, idedma_ctl); 3418 } 3419 pciide_print_modes(cp); 3420 } 3421 3422 int 3423 acer_pci_intr(arg) 3424 void *arg; 3425 { 3426 struct pciide_softc *sc = arg; 3427 struct pciide_channel *cp; 3428 struct channel_softc *wdc_cp; 3429 int i, rv, crv; 3430 u_int32_t chids; 3431 3432 rv = 0; 3433 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 3434 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3435 cp = &sc->pciide_channels[i]; 3436 wdc_cp = &cp->wdc_channel; 3437 /* If a compat channel skip. */ 3438 if (cp->compat) 3439 continue; 3440 if (chids & ACER_CHIDS_INT(i)) { 3441 crv = wdcintr(wdc_cp); 3442 if (crv == 0) 3443 printf("%s:%d: bogus intr\n", 3444 sc->sc_wdcdev.sc_dev.dv_xname, i); 3445 else 3446 rv = 1; 3447 } 3448 } 3449 return rv; 3450 } 3451 3452 void 3453 hpt_chip_map(sc, pa) 3454 struct pciide_softc *sc; 3455 struct pci_attach_args *pa; 3456 { 3457 struct pciide_channel *cp; 3458 int i, compatchan, revision; 3459 pcireg_t interface; 3460 bus_size_t cmdsize, ctlsize; 3461 3462 if (pciide_chipen(sc, pa) == 0) 3463 return; 3464 revision = PCI_REVISION(pa->pa_class); 3465 printf(": Triones/Highpoint "); 3466 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3467 printf("HPT374 IDE Controller\n"); 3468 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372) 3469 printf("HPT372 IDE Controller\n"); 3470 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 3471 if (revision == HPT372_REV) 3472 printf("HPT372 IDE Controller\n"); 3473 else if (revision == HPT370_REV) 3474 printf("HPT370 IDE Controller\n"); 3475 else if (revision == HPT370A_REV) 3476 printf("HPT370A IDE Controller\n"); 3477 else if (revision == HPT366_REV) 3478 printf("HPT366 IDE Controller\n"); 3479 else 3480 printf("unknown HPT IDE controller rev %d\n", revision); 3481 } else 3482 printf("unknown HPT IDE controller 0x%x\n", 3483 sc->sc_pp->ide_product); 3484 3485 /* 3486 * when the chip is in native mode it identifies itself as a 3487 * 'misc mass storage'. Fake interface in this case. 3488 */ 3489 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3490 interface = PCI_INTERFACE(pa->pa_class); 3491 } else { 3492 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3493 PCIIDE_INTERFACE_PCI(0); 3494 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3495 (revision == HPT370_REV || revision == HPT370A_REV || 3496 revision == HPT372_REV)) || 3497 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 || 3498 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3499 interface |= PCIIDE_INTERFACE_PCI(1); 3500 } 3501 3502 printf("%s: bus-master DMA support present", 3503 sc->sc_wdcdev.sc_dev.dv_xname); 3504 pciide_mapreg_dma(sc, pa); 3505 printf("\n"); 3506 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3507 WDC_CAPABILITY_MODE; 3508 if (sc->sc_dma_ok) { 3509 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3510 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3511 sc->sc_wdcdev.irqack = pciide_irqack; 3512 } 3513 sc->sc_wdcdev.PIO_cap = 4; 3514 sc->sc_wdcdev.DMA_cap = 2; 3515 3516 sc->sc_wdcdev.set_modes = hpt_setup_channel; 3517 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3518 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3519 revision == HPT366_REV) { 3520 sc->sc_wdcdev.UDMA_cap = 4; 3521 /* 3522 * The 366 has 2 PCI IDE functions, one for primary and one 3523 * for secondary. So we need to call pciide_mapregs_compat() 3524 * with the real channel 3525 */ 3526 if (pa->pa_function == 0) { 3527 compatchan = 0; 3528 } else if (pa->pa_function == 1) { 3529 compatchan = 1; 3530 } else { 3531 printf("%s: unexpected PCI function %d\n", 3532 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 3533 return; 3534 } 3535 sc->sc_wdcdev.nchannels = 1; 3536 } else { 3537 sc->sc_wdcdev.nchannels = 2; 3538 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 3539 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 || 3540 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3541 revision == HPT372_REV)) 3542 sc->sc_wdcdev.UDMA_cap = 6; 3543 else 3544 sc->sc_wdcdev.UDMA_cap = 5; 3545 } 3546 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3547 cp = &sc->pciide_channels[i]; 3548 if (sc->sc_wdcdev.nchannels > 1) { 3549 compatchan = i; 3550 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 3551 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 3552 printf("%s: %s channel ignored (disabled)\n", 3553 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3554 continue; 3555 } 3556 } 3557 if (pciide_chansetup(sc, i, interface) == 0) 3558 continue; 3559 if (interface & PCIIDE_INTERFACE_PCI(i)) { 3560 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 3561 &ctlsize, hpt_pci_intr); 3562 } else { 3563 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 3564 &cmdsize, &ctlsize); 3565 } 3566 if (cp->hw_ok == 0) 3567 return; 3568 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 3569 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 3570 wdcattach(&cp->wdc_channel); 3571 hpt_setup_channel(&cp->wdc_channel); 3572 } 3573 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3574 (revision == HPT370_REV || revision == HPT370A_REV || 3575 revision == HPT372_REV)) || 3576 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 || 3577 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 3578 /* 3579 * HPT370_REV and highter has a bit to disable interrupts, 3580 * make sure to clear it 3581 */ 3582 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 3583 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 3584 ~HPT_CSEL_IRQDIS); 3585 } 3586 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 3587 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3588 revision == HPT372_REV ) || 3589 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 || 3590 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3591 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 3592 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 3593 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 3594 return; 3595 } 3596 3597 void 3598 hpt_setup_channel(chp) 3599 struct channel_softc *chp; 3600 { 3601 struct ata_drive_datas *drvp; 3602 int drive; 3603 int cable; 3604 u_int32_t before, after; 3605 u_int32_t idedma_ctl; 3606 struct pciide_channel *cp = (struct pciide_channel*)chp; 3607 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3608 int revision = 3609 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 3610 3611 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 3612 3613 /* setup DMA if needed */ 3614 pciide_channel_dma_setup(cp); 3615 3616 idedma_ctl = 0; 3617 3618 /* Per drive settings */ 3619 for (drive = 0; drive < 2; drive++) { 3620 drvp = &chp->ch_drive[drive]; 3621 /* If no drive, skip */ 3622 if ((drvp->drive_flags & DRIVE) == 0) 3623 continue; 3624 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 3625 HPT_IDETIM(chp->channel, drive)); 3626 3627 /* add timing values, setup DMA if needed */ 3628 if (drvp->drive_flags & DRIVE_UDMA) { 3629 /* use Ultra/DMA */ 3630 drvp->drive_flags &= ~DRIVE_DMA; 3631 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 3632 drvp->UDMA_mode > 2) 3633 drvp->UDMA_mode = 2; 3634 switch (sc->sc_pp->ide_product) { 3635 case PCI_PRODUCT_TRIONES_HPT374: 3636 after = hpt374_udma[drvp->UDMA_mode]; 3637 break; 3638 case PCI_PRODUCT_TRIONES_HPT372: 3639 after = hpt372_udma[drvp->UDMA_mode]; 3640 break; 3641 case PCI_PRODUCT_TRIONES_HPT366: 3642 default: 3643 switch(revision) { 3644 case HPT372_REV: 3645 after = hpt372_udma[drvp->UDMA_mode]; 3646 break; 3647 case HPT370_REV: 3648 case HPT370A_REV: 3649 after = hpt370_udma[drvp->UDMA_mode]; 3650 break; 3651 case HPT366_REV: 3652 default: 3653 after = hpt366_udma[drvp->UDMA_mode]; 3654 break; 3655 } 3656 } 3657 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3658 } else if (drvp->drive_flags & DRIVE_DMA) { 3659 /* 3660 * use Multiword DMA. 3661 * Timings will be used for both PIO and DMA, so adjust 3662 * DMA mode if needed 3663 */ 3664 if (drvp->PIO_mode >= 3 && 3665 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3666 drvp->DMA_mode = drvp->PIO_mode - 2; 3667 } 3668 switch (sc->sc_pp->ide_product) { 3669 case PCI_PRODUCT_TRIONES_HPT374: 3670 after = hpt374_dma[drvp->DMA_mode]; 3671 break; 3672 case PCI_PRODUCT_TRIONES_HPT372: 3673 after = hpt372_dma[drvp->DMA_mode]; 3674 break; 3675 case PCI_PRODUCT_TRIONES_HPT366: 3676 default: 3677 switch(revision) { 3678 case HPT372_REV: 3679 after = hpt372_dma[drvp->DMA_mode]; 3680 break; 3681 case HPT370_REV: 3682 case HPT370A_REV: 3683 after = hpt370_dma[drvp->DMA_mode]; 3684 break; 3685 case HPT366_REV: 3686 default: 3687 after = hpt366_dma[drvp->DMA_mode]; 3688 break; 3689 } 3690 } 3691 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3692 } else { 3693 /* PIO only */ 3694 switch (sc->sc_pp->ide_product) { 3695 case PCI_PRODUCT_TRIONES_HPT374: 3696 after = hpt374_pio[drvp->PIO_mode]; 3697 break; 3698 case PCI_PRODUCT_TRIONES_HPT372: 3699 after = hpt372_pio[drvp->PIO_mode]; 3700 break; 3701 case PCI_PRODUCT_TRIONES_HPT366: 3702 default: 3703 switch(revision) { 3704 case HPT372_REV: 3705 after = hpt372_pio[drvp->PIO_mode]; 3706 break; 3707 case HPT370_REV: 3708 case HPT370A_REV: 3709 after = hpt370_pio[drvp->PIO_mode]; 3710 break; 3711 case HPT366_REV: 3712 default: 3713 after = hpt366_pio[drvp->PIO_mode]; 3714 break; 3715 } 3716 } 3717 } 3718 pci_conf_write(sc->sc_pc, sc->sc_tag, 3719 HPT_IDETIM(chp->channel, drive), after); 3720 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 3721 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname, 3722 after, before), DEBUG_PROBE); 3723 } 3724 if (idedma_ctl != 0) { 3725 /* Add software bits in status register */ 3726 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3727 IDEDMA_CTL, idedma_ctl); 3728 } 3729 pciide_print_modes(cp); 3730 } 3731 3732 int 3733 hpt_pci_intr(arg) 3734 void *arg; 3735 { 3736 struct pciide_softc *sc = arg; 3737 struct pciide_channel *cp; 3738 struct channel_softc *wdc_cp; 3739 int rv = 0; 3740 int dmastat, i, crv; 3741 3742 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3743 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3744 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3745 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 3746 IDEDMA_CTL_INTR) 3747 continue; 3748 cp = &sc->pciide_channels[i]; 3749 wdc_cp = &cp->wdc_channel; 3750 crv = wdcintr(wdc_cp); 3751 if (crv == 0) { 3752 printf("%s:%d: bogus intr\n", 3753 sc->sc_wdcdev.sc_dev.dv_xname, i); 3754 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3755 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 3756 } else 3757 rv = 1; 3758 } 3759 return rv; 3760 } 3761 3762 3763 /* Macros to test product */ 3764 #define PDC_IS_262(sc) \ 3765 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \ 3766 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3767 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \ 3768 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3769 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3770 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \ 3771 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \ 3772 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2) 3773 #define PDC_IS_265(sc) \ 3774 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3775 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \ 3776 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3777 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3778 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \ 3779 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \ 3780 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2) 3781 #define PDC_IS_268(sc) \ 3782 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3783 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3784 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \ 3785 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \ 3786 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2) 3787 #define PDC_IS_276(sc) \ 3788 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \ 3789 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \ 3790 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2) 3791 3792 void 3793 pdc202xx_chip_map(sc, pa) 3794 struct pciide_softc *sc; 3795 struct pci_attach_args *pa; 3796 { 3797 struct pciide_channel *cp; 3798 int channel; 3799 pcireg_t interface, st, mode; 3800 bus_size_t cmdsize, ctlsize; 3801 3802 if (!PDC_IS_268(sc)) { 3803 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3804 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 3805 st), DEBUG_PROBE); 3806 } 3807 if (pciide_chipen(sc, pa) == 0) 3808 return; 3809 3810 /* turn off RAID mode */ 3811 if (!PDC_IS_268(sc)) 3812 st &= ~PDC2xx_STATE_IDERAID; 3813 3814 /* 3815 * can't rely on the PCI_CLASS_REG content if the chip was in raid 3816 * mode. We have to fake interface 3817 */ 3818 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 3819 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 3820 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3821 3822 printf("%s: bus-master DMA support present", 3823 sc->sc_wdcdev.sc_dev.dv_xname); 3824 pciide_mapreg_dma(sc, pa); 3825 printf("\n"); 3826 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3827 WDC_CAPABILITY_MODE; 3828 if (sc->sc_dma_ok) { 3829 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3830 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3831 sc->sc_wdcdev.irqack = pciide_irqack; 3832 } 3833 sc->sc_wdcdev.PIO_cap = 4; 3834 sc->sc_wdcdev.DMA_cap = 2; 3835 if (PDC_IS_276(sc)) 3836 sc->sc_wdcdev.UDMA_cap = 6; 3837 else if (PDC_IS_265(sc)) 3838 sc->sc_wdcdev.UDMA_cap = 5; 3839 else if (PDC_IS_262(sc)) 3840 sc->sc_wdcdev.UDMA_cap = 4; 3841 else 3842 sc->sc_wdcdev.UDMA_cap = 2; 3843 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 3844 pdc20268_setup_channel : pdc202xx_setup_channel; 3845 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3846 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3847 3848 if (!PDC_IS_268(sc)) { 3849 /* setup failsafe defaults */ 3850 mode = 0; 3851 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 3852 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 3853 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 3854 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 3855 for (channel = 0; 3856 channel < sc->sc_wdcdev.nchannels; 3857 channel++) { 3858 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 3859 "drive 0 initial timings 0x%x, now 0x%x\n", 3860 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 3861 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 3862 DEBUG_PROBE); 3863 pci_conf_write(sc->sc_pc, sc->sc_tag, 3864 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 3865 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 3866 "drive 1 initial timings 0x%x, now 0x%x\n", 3867 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 3868 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 3869 pci_conf_write(sc->sc_pc, sc->sc_tag, 3870 PDC2xx_TIM(channel, 1), mode); 3871 } 3872 3873 mode = PDC2xx_SCR_DMA; 3874 if (PDC_IS_262(sc)) { 3875 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 3876 } else { 3877 /* the BIOS set it up this way */ 3878 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 3879 } 3880 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 3881 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 3882 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 3883 "now 0x%x\n", 3884 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3885 PDC2xx_SCR), 3886 mode), DEBUG_PROBE); 3887 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3888 PDC2xx_SCR, mode); 3889 3890 /* controller initial state register is OK even without BIOS */ 3891 /* Set DMA mode to IDE DMA compatibility */ 3892 mode = 3893 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 3894 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 3895 DEBUG_PROBE); 3896 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 3897 mode | 0x1); 3898 mode = 3899 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 3900 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 3901 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 3902 mode | 0x1); 3903 } 3904 3905 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3906 cp = &sc->pciide_channels[channel]; 3907 if (pciide_chansetup(sc, channel, interface) == 0) 3908 continue; 3909 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 3910 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 3911 printf("%s: %s channel ignored (disabled)\n", 3912 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3913 continue; 3914 } 3915 if (PDC_IS_265(sc)) 3916 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3917 pdc20265_pci_intr); 3918 else 3919 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3920 pdc202xx_pci_intr); 3921 if (cp->hw_ok == 0) 3922 continue; 3923 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) 3924 st &= ~(PDC_IS_262(sc) ? 3925 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 3926 pciide_map_compat_intr(pa, cp, channel, interface); 3927 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 3928 } 3929 if (!PDC_IS_268(sc)) { 3930 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 3931 "0x%x\n", st), DEBUG_PROBE); 3932 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 3933 } 3934 return; 3935 } 3936 3937 void 3938 pdc202xx_setup_channel(chp) 3939 struct channel_softc *chp; 3940 { 3941 struct ata_drive_datas *drvp; 3942 int drive; 3943 pcireg_t mode, st; 3944 u_int32_t idedma_ctl, scr, atapi; 3945 struct pciide_channel *cp = (struct pciide_channel*)chp; 3946 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3947 int channel = chp->channel; 3948 3949 /* setup DMA if needed */ 3950 pciide_channel_dma_setup(cp); 3951 3952 idedma_ctl = 0; 3953 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 3954 sc->sc_wdcdev.sc_dev.dv_xname, 3955 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 3956 DEBUG_PROBE); 3957 3958 /* Per channel settings */ 3959 if (PDC_IS_262(sc)) { 3960 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3961 PDC262_U66); 3962 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3963 /* Trim UDMA mode */ 3964 if ((st & PDC262_STATE_80P(channel)) != 0 || 3965 (chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3966 chp->ch_drive[0].UDMA_mode <= 2) || 3967 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3968 chp->ch_drive[1].UDMA_mode <= 2)) { 3969 if (chp->ch_drive[0].UDMA_mode > 2) 3970 chp->ch_drive[0].UDMA_mode = 2; 3971 if (chp->ch_drive[1].UDMA_mode > 2) 3972 chp->ch_drive[1].UDMA_mode = 2; 3973 } 3974 /* Set U66 if needed */ 3975 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3976 chp->ch_drive[0].UDMA_mode > 2) || 3977 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3978 chp->ch_drive[1].UDMA_mode > 2)) 3979 scr |= PDC262_U66_EN(channel); 3980 else 3981 scr &= ~PDC262_U66_EN(channel); 3982 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3983 PDC262_U66, scr); 3984 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 3985 sc->sc_wdcdev.sc_dev.dv_xname, channel, 3986 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3987 PDC262_ATAPI(channel))), DEBUG_PROBE); 3988 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 3989 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 3990 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3991 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3992 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 3993 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3994 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3995 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 3996 atapi = 0; 3997 else 3998 atapi = PDC262_ATAPI_UDMA; 3999 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4000 PDC262_ATAPI(channel), atapi); 4001 } 4002 } 4003 for (drive = 0; drive < 2; drive++) { 4004 drvp = &chp->ch_drive[drive]; 4005 /* If no drive, skip */ 4006 if ((drvp->drive_flags & DRIVE) == 0) 4007 continue; 4008 mode = 0; 4009 if (drvp->drive_flags & DRIVE_UDMA) { 4010 /* use Ultra/DMA */ 4011 drvp->drive_flags &= ~DRIVE_DMA; 4012 mode = PDC2xx_TIM_SET_MB(mode, 4013 pdc2xx_udma_mb[drvp->UDMA_mode]); 4014 mode = PDC2xx_TIM_SET_MC(mode, 4015 pdc2xx_udma_mc[drvp->UDMA_mode]); 4016 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4017 } else if (drvp->drive_flags & DRIVE_DMA) { 4018 mode = PDC2xx_TIM_SET_MB(mode, 4019 pdc2xx_dma_mb[drvp->DMA_mode]); 4020 mode = PDC2xx_TIM_SET_MC(mode, 4021 pdc2xx_dma_mc[drvp->DMA_mode]); 4022 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4023 } else { 4024 mode = PDC2xx_TIM_SET_MB(mode, 4025 pdc2xx_dma_mb[0]); 4026 mode = PDC2xx_TIM_SET_MC(mode, 4027 pdc2xx_dma_mc[0]); 4028 } 4029 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 4030 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 4031 if (drvp->drive_flags & DRIVE_ATA) 4032 mode |= PDC2xx_TIM_PRE; 4033 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 4034 if (drvp->PIO_mode >= 3) { 4035 mode |= PDC2xx_TIM_IORDY; 4036 if (drive == 0) 4037 mode |= PDC2xx_TIM_IORDYp; 4038 } 4039 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 4040 "timings 0x%x\n", 4041 sc->sc_wdcdev.sc_dev.dv_xname, 4042 chp->channel, drive, mode), DEBUG_PROBE); 4043 pci_conf_write(sc->sc_pc, sc->sc_tag, 4044 PDC2xx_TIM(chp->channel, drive), mode); 4045 } 4046 if (idedma_ctl != 0) { 4047 /* Add software bits in status register */ 4048 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4049 IDEDMA_CTL, idedma_ctl); 4050 } 4051 pciide_print_modes(cp); 4052 } 4053 4054 void 4055 pdc20268_setup_channel(chp) 4056 struct channel_softc *chp; 4057 { 4058 struct ata_drive_datas *drvp; 4059 int drive; 4060 u_int32_t idedma_ctl; 4061 struct pciide_channel *cp = (struct pciide_channel*)chp; 4062 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4063 int u100; 4064 4065 /* setup DMA if needed */ 4066 pciide_channel_dma_setup(cp); 4067 4068 idedma_ctl = 0; 4069 4070 /* I don't know what this is for, FreeBSD does it ... */ 4071 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4072 IDEDMA_CMD + 0x1, 0x0b); 4073 4074 /* 4075 * I don't know what this is for; FreeBSD checks this ... this is not 4076 * cable type detect. 4077 */ 4078 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4079 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1; 4080 4081 for (drive = 0; drive < 2; drive++) { 4082 drvp = &chp->ch_drive[drive]; 4083 /* If no drive, skip */ 4084 if ((drvp->drive_flags & DRIVE) == 0) 4085 continue; 4086 if (drvp->drive_flags & DRIVE_UDMA) { 4087 /* use Ultra/DMA */ 4088 drvp->drive_flags &= ~DRIVE_DMA; 4089 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4090 if (drvp->UDMA_mode > 2 && u100 == 0) 4091 drvp->UDMA_mode = 2; 4092 } else if (drvp->drive_flags & DRIVE_DMA) { 4093 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4094 } 4095 } 4096 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 4097 if (idedma_ctl != 0) { 4098 /* Add software bits in status register */ 4099 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4100 IDEDMA_CTL, idedma_ctl); 4101 } 4102 pciide_print_modes(cp); 4103 } 4104 4105 int 4106 pdc202xx_pci_intr(arg) 4107 void *arg; 4108 { 4109 struct pciide_softc *sc = arg; 4110 struct pciide_channel *cp; 4111 struct channel_softc *wdc_cp; 4112 int i, rv, crv; 4113 u_int32_t scr; 4114 4115 rv = 0; 4116 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 4117 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4118 cp = &sc->pciide_channels[i]; 4119 wdc_cp = &cp->wdc_channel; 4120 /* If a compat channel skip. */ 4121 if (cp->compat) 4122 continue; 4123 if (scr & PDC2xx_SCR_INT(i)) { 4124 crv = wdcintr(wdc_cp); 4125 if (crv == 0) 4126 printf("%s:%d: bogus intr (reg 0x%x)\n", 4127 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 4128 else 4129 rv = 1; 4130 } 4131 } 4132 return rv; 4133 } 4134 4135 int 4136 pdc20265_pci_intr(arg) 4137 void *arg; 4138 { 4139 struct pciide_softc *sc = arg; 4140 struct pciide_channel *cp; 4141 struct channel_softc *wdc_cp; 4142 int i, rv, crv; 4143 u_int32_t dmastat; 4144 4145 rv = 0; 4146 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4147 cp = &sc->pciide_channels[i]; 4148 wdc_cp = &cp->wdc_channel; 4149 /* If a compat channel skip. */ 4150 if (cp->compat) 4151 continue; 4152 /* 4153 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 4154 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 4155 * So use it instead (requires 2 reg reads instead of 1, 4156 * but we can't do it another way). 4157 */ 4158 dmastat = bus_space_read_1(sc->sc_dma_iot, 4159 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4160 if((dmastat & IDEDMA_CTL_INTR) == 0) 4161 continue; 4162 crv = wdcintr(wdc_cp); 4163 if (crv == 0) 4164 printf("%s:%d: bogus intr\n", 4165 sc->sc_wdcdev.sc_dev.dv_xname, i); 4166 else 4167 rv = 1; 4168 } 4169 return rv; 4170 } 4171 4172 void 4173 opti_chip_map(sc, pa) 4174 struct pciide_softc *sc; 4175 struct pci_attach_args *pa; 4176 { 4177 struct pciide_channel *cp; 4178 bus_size_t cmdsize, ctlsize; 4179 pcireg_t interface; 4180 u_int8_t init_ctrl; 4181 int channel; 4182 4183 if (pciide_chipen(sc, pa) == 0) 4184 return; 4185 printf("%s: bus-master DMA support present", 4186 sc->sc_wdcdev.sc_dev.dv_xname); 4187 4188 /* 4189 * XXXSCW: 4190 * There seem to be a couple of buggy revisions/implementations 4191 * of the OPTi pciide chipset. This kludge seems to fix one of 4192 * the reported problems (PR/11644) but still fails for the 4193 * other (PR/13151), although the latter may be due to other 4194 * issues too... 4195 */ 4196 if (PCI_REVISION(pa->pa_class) <= 0x12) { 4197 printf(" but disabled due to chip rev. <= 0x12"); 4198 sc->sc_dma_ok = 0; 4199 } else 4200 pciide_mapreg_dma(sc, pa); 4201 4202 printf("\n"); 4203 4204 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 | 4205 WDC_CAPABILITY_MODE; 4206 sc->sc_wdcdev.PIO_cap = 4; 4207 if (sc->sc_dma_ok) { 4208 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4209 sc->sc_wdcdev.irqack = pciide_irqack; 4210 sc->sc_wdcdev.DMA_cap = 2; 4211 } 4212 sc->sc_wdcdev.set_modes = opti_setup_channel; 4213 4214 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4215 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4216 4217 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 4218 OPTI_REG_INIT_CONTROL); 4219 4220 interface = PCI_INTERFACE(pa->pa_class); 4221 4222 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4223 cp = &sc->pciide_channels[channel]; 4224 if (pciide_chansetup(sc, channel, interface) == 0) 4225 continue; 4226 if (channel == 1 && 4227 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 4228 printf("%s: %s channel ignored (disabled)\n", 4229 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4230 continue; 4231 } 4232 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4233 pciide_pci_intr); 4234 if (cp->hw_ok == 0) 4235 continue; 4236 pciide_map_compat_intr(pa, cp, channel, interface); 4237 if (cp->hw_ok == 0) 4238 continue; 4239 opti_setup_channel(&cp->wdc_channel); 4240 } 4241 } 4242 4243 void 4244 opti_setup_channel(chp) 4245 struct channel_softc *chp; 4246 { 4247 struct ata_drive_datas *drvp; 4248 struct pciide_channel *cp = (struct pciide_channel*)chp; 4249 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4250 int drive, spd; 4251 int mode[2]; 4252 u_int8_t rv, mr; 4253 4254 /* 4255 * The `Delay' and `Address Setup Time' fields of the 4256 * Miscellaneous Register are always zero initially. 4257 */ 4258 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 4259 mr &= ~(OPTI_MISC_DELAY_MASK | 4260 OPTI_MISC_ADDR_SETUP_MASK | 4261 OPTI_MISC_INDEX_MASK); 4262 4263 /* Prime the control register before setting timing values */ 4264 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 4265 4266 /* Determine the clockrate of the PCIbus the chip is attached to */ 4267 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 4268 spd &= OPTI_STRAP_PCI_SPEED_MASK; 4269 4270 /* setup DMA if needed */ 4271 pciide_channel_dma_setup(cp); 4272 4273 for (drive = 0; drive < 2; drive++) { 4274 drvp = &chp->ch_drive[drive]; 4275 /* If no drive, skip */ 4276 if ((drvp->drive_flags & DRIVE) == 0) { 4277 mode[drive] = -1; 4278 continue; 4279 } 4280 4281 if ((drvp->drive_flags & DRIVE_DMA)) { 4282 /* 4283 * Timings will be used for both PIO and DMA, 4284 * so adjust DMA mode if needed 4285 */ 4286 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 4287 drvp->PIO_mode = drvp->DMA_mode + 2; 4288 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 4289 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 4290 drvp->PIO_mode - 2 : 0; 4291 if (drvp->DMA_mode == 0) 4292 drvp->PIO_mode = 0; 4293 4294 mode[drive] = drvp->DMA_mode + 5; 4295 } else 4296 mode[drive] = drvp->PIO_mode; 4297 4298 if (drive && mode[0] >= 0 && 4299 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 4300 /* 4301 * Can't have two drives using different values 4302 * for `Address Setup Time'. 4303 * Slow down the faster drive to compensate. 4304 */ 4305 int d = (opti_tim_as[spd][mode[0]] > 4306 opti_tim_as[spd][mode[1]]) ? 0 : 1; 4307 4308 mode[d] = mode[1-d]; 4309 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 4310 chp->ch_drive[d].DMA_mode = 0; 4311 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA; 4312 } 4313 } 4314 4315 for (drive = 0; drive < 2; drive++) { 4316 int m; 4317 if ((m = mode[drive]) < 0) 4318 continue; 4319 4320 /* Set the Address Setup Time and select appropriate index */ 4321 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 4322 rv |= OPTI_MISC_INDEX(drive); 4323 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 4324 4325 /* Set the pulse width and recovery timing parameters */ 4326 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 4327 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 4328 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 4329 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 4330 4331 /* Set the Enhanced Mode register appropriately */ 4332 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 4333 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 4334 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 4335 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 4336 } 4337 4338 /* Finally, enable the timings */ 4339 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 4340 4341 pciide_print_modes(cp); 4342 } 4343 4344 #define ACARD_IS_850(sc) \ 4345 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 4346 4347 void 4348 acard_chip_map(sc, pa) 4349 struct pciide_softc *sc; 4350 struct pci_attach_args *pa; 4351 { 4352 struct pciide_channel *cp; 4353 int i; 4354 pcireg_t interface; 4355 bus_size_t cmdsize, ctlsize; 4356 4357 if (pciide_chipen(sc, pa) == 0) 4358 return; 4359 4360 /* 4361 * when the chip is in native mode it identifies itself as a 4362 * 'misc mass storage'. Fake interface in this case. 4363 */ 4364 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4365 interface = PCI_INTERFACE(pa->pa_class); 4366 } else { 4367 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4368 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4369 } 4370 4371 printf("%s: bus-master DMA support present", 4372 sc->sc_wdcdev.sc_dev.dv_xname); 4373 pciide_mapreg_dma(sc, pa); 4374 printf("\n"); 4375 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4376 WDC_CAPABILITY_MODE; 4377 4378 if (sc->sc_dma_ok) { 4379 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4380 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4381 sc->sc_wdcdev.irqack = pciide_irqack; 4382 } 4383 sc->sc_wdcdev.PIO_cap = 4; 4384 sc->sc_wdcdev.DMA_cap = 2; 4385 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4; 4386 4387 sc->sc_wdcdev.set_modes = acard_setup_channel; 4388 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4389 sc->sc_wdcdev.nchannels = 2; 4390 4391 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4392 cp = &sc->pciide_channels[i]; 4393 if (pciide_chansetup(sc, i, interface) == 0) 4394 continue; 4395 if (interface & PCIIDE_INTERFACE_PCI(i)) { 4396 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 4397 &ctlsize, pciide_pci_intr); 4398 } else { 4399 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 4400 &cmdsize, &ctlsize); 4401 } 4402 if (cp->hw_ok == 0) 4403 return; 4404 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4405 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4406 wdcattach(&cp->wdc_channel); 4407 acard_setup_channel(&cp->wdc_channel); 4408 } 4409 if (!ACARD_IS_850(sc)) { 4410 u_int32_t reg; 4411 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 4412 reg &= ~ATP860_CTRL_INT; 4413 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 4414 } 4415 } 4416 4417 void 4418 acard_setup_channel(chp) 4419 struct channel_softc *chp; 4420 { 4421 struct ata_drive_datas *drvp; 4422 struct pciide_channel *cp = (struct pciide_channel*)chp; 4423 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4424 int channel = chp->channel; 4425 int drive; 4426 u_int32_t idetime, udma_mode; 4427 u_int32_t idedma_ctl; 4428 4429 /* setup DMA if needed */ 4430 pciide_channel_dma_setup(cp); 4431 4432 if (ACARD_IS_850(sc)) { 4433 idetime = 0; 4434 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 4435 udma_mode &= ~ATP850_UDMA_MASK(channel); 4436 } else { 4437 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 4438 idetime &= ~ATP860_SETTIME_MASK(channel); 4439 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 4440 udma_mode &= ~ATP860_UDMA_MASK(channel); 4441 4442 /* check 80 pins cable */ 4443 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) || 4444 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 4445 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 4446 & ATP860_CTRL_80P(chp->channel)) { 4447 if (chp->ch_drive[0].UDMA_mode > 2) 4448 chp->ch_drive[0].UDMA_mode = 2; 4449 if (chp->ch_drive[1].UDMA_mode > 2) 4450 chp->ch_drive[1].UDMA_mode = 2; 4451 } 4452 } 4453 } 4454 4455 idedma_ctl = 0; 4456 4457 /* Per drive settings */ 4458 for (drive = 0; drive < 2; drive++) { 4459 drvp = &chp->ch_drive[drive]; 4460 /* If no drive, skip */ 4461 if ((drvp->drive_flags & DRIVE) == 0) 4462 continue; 4463 /* add timing values, setup DMA if needed */ 4464 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 4465 (drvp->drive_flags & DRIVE_UDMA)) { 4466 /* use Ultra/DMA */ 4467 if (ACARD_IS_850(sc)) { 4468 idetime |= ATP850_SETTIME(drive, 4469 acard_act_udma[drvp->UDMA_mode], 4470 acard_rec_udma[drvp->UDMA_mode]); 4471 udma_mode |= ATP850_UDMA_MODE(channel, drive, 4472 acard_udma_conf[drvp->UDMA_mode]); 4473 } else { 4474 idetime |= ATP860_SETTIME(channel, drive, 4475 acard_act_udma[drvp->UDMA_mode], 4476 acard_rec_udma[drvp->UDMA_mode]); 4477 udma_mode |= ATP860_UDMA_MODE(channel, drive, 4478 acard_udma_conf[drvp->UDMA_mode]); 4479 } 4480 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4481 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 4482 (drvp->drive_flags & DRIVE_DMA)) { 4483 /* use Multiword DMA */ 4484 drvp->drive_flags &= ~DRIVE_UDMA; 4485 if (ACARD_IS_850(sc)) { 4486 idetime |= ATP850_SETTIME(drive, 4487 acard_act_dma[drvp->DMA_mode], 4488 acard_rec_dma[drvp->DMA_mode]); 4489 } else { 4490 idetime |= ATP860_SETTIME(channel, drive, 4491 acard_act_dma[drvp->DMA_mode], 4492 acard_rec_dma[drvp->DMA_mode]); 4493 } 4494 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4495 } else { 4496 /* PIO only */ 4497 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 4498 if (ACARD_IS_850(sc)) { 4499 idetime |= ATP850_SETTIME(drive, 4500 acard_act_pio[drvp->PIO_mode], 4501 acard_rec_pio[drvp->PIO_mode]); 4502 } else { 4503 idetime |= ATP860_SETTIME(channel, drive, 4504 acard_act_pio[drvp->PIO_mode], 4505 acard_rec_pio[drvp->PIO_mode]); 4506 } 4507 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 4508 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 4509 | ATP8x0_CTRL_EN(channel)); 4510 } 4511 } 4512 4513 if (idedma_ctl != 0) { 4514 /* Add software bits in status register */ 4515 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4516 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl); 4517 } 4518 pciide_print_modes(cp); 4519 4520 if (ACARD_IS_850(sc)) { 4521 pci_conf_write(sc->sc_pc, sc->sc_tag, 4522 ATP850_IDETIME(channel), idetime); 4523 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 4524 } else { 4525 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 4526 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 4527 } 4528 } 4529 4530 int 4531 acard_pci_intr(arg) 4532 void *arg; 4533 { 4534 struct pciide_softc *sc = arg; 4535 struct pciide_channel *cp; 4536 struct channel_softc *wdc_cp; 4537 int rv = 0; 4538 int dmastat, i, crv; 4539 4540 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4541 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4542 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4543 if ((dmastat & IDEDMA_CTL_INTR) == 0) 4544 continue; 4545 cp = &sc->pciide_channels[i]; 4546 wdc_cp = &cp->wdc_channel; 4547 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) { 4548 (void)wdcintr(wdc_cp); 4549 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4550 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 4551 continue; 4552 } 4553 crv = wdcintr(wdc_cp); 4554 if (crv == 0) 4555 printf("%s:%d: bogus intr\n", 4556 sc->sc_wdcdev.sc_dev.dv_xname, i); 4557 else if (crv == 1) 4558 rv = 1; 4559 else if (rv == 0) 4560 rv = crv; 4561 } 4562 return rv; 4563 } 4564 4565 static int 4566 sl82c105_bugchk(struct pci_attach_args *pa) 4567 { 4568 4569 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND || 4570 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0) 4571 return (0); 4572 4573 if (PCI_REVISION(pa->pa_class) <= 0x05) 4574 return (1); 4575 4576 return (0); 4577 } 4578 4579 void 4580 sl82c105_chip_map(sc, pa) 4581 struct pciide_softc *sc; 4582 struct pci_attach_args *pa; 4583 { 4584 struct pciide_channel *cp; 4585 bus_size_t cmdsize, ctlsize; 4586 pcireg_t interface, idecr; 4587 int channel; 4588 4589 if (pciide_chipen(sc, pa) == 0) 4590 return; 4591 4592 printf("%s: bus-master DMA support present", 4593 sc->sc_wdcdev.sc_dev.dv_xname); 4594 4595 /* 4596 * Check to see if we're part of the Winbond 83c553 Southbridge. 4597 * If so, we need to disable DMA on rev. <= 5 of that chip. 4598 */ 4599 if (pci_find_device(pa, sl82c105_bugchk)) { 4600 printf(" but disabled due to 83c553 rev. <= 0x05"); 4601 sc->sc_dma_ok = 0; 4602 } else 4603 pciide_mapreg_dma(sc, pa); 4604 printf("\n"); 4605 4606 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 | 4607 WDC_CAPABILITY_MODE; 4608 sc->sc_wdcdev.PIO_cap = 4; 4609 if (sc->sc_dma_ok) { 4610 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4611 sc->sc_wdcdev.irqack = pciide_irqack; 4612 sc->sc_wdcdev.DMA_cap = 2; 4613 } 4614 sc->sc_wdcdev.set_modes = sl82c105_setup_channel; 4615 4616 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4617 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4618 4619 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR); 4620 4621 interface = PCI_INTERFACE(pa->pa_class); 4622 4623 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4624 cp = &sc->pciide_channels[channel]; 4625 if (pciide_chansetup(sc, channel, interface) == 0) 4626 continue; 4627 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) || 4628 (channel == 1 && (idecr & IDECR_P1EN) == 0)) { 4629 printf("%s: %s channel ignored (disabled)\n", 4630 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4631 continue; 4632 } 4633 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4634 pciide_pci_intr); 4635 if (cp->hw_ok == 0) 4636 continue; 4637 pciide_map_compat_intr(pa, cp, channel, interface); 4638 if (cp->hw_ok == 0) 4639 continue; 4640 sl82c105_setup_channel(&cp->wdc_channel); 4641 } 4642 } 4643 4644 void 4645 sl82c105_setup_channel(chp) 4646 struct channel_softc *chp; 4647 { 4648 struct ata_drive_datas *drvp; 4649 struct pciide_channel *cp = (struct pciide_channel*)chp; 4650 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4651 int pxdx_reg, drive; 4652 pcireg_t pxdx; 4653 4654 /* Set up DMA if needed. */ 4655 pciide_channel_dma_setup(cp); 4656 4657 for (drive = 0; drive < 2; drive++) { 4658 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR 4659 : SYMPH_P1D0CR) + (drive * 4); 4660 4661 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg); 4662 4663 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK); 4664 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN); 4665 4666 drvp = &chp->ch_drive[drive]; 4667 /* If no drive, skip. */ 4668 if ((drvp->drive_flags & DRIVE) == 0) { 4669 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx); 4670 continue; 4671 } 4672 4673 if (drvp->drive_flags & DRIVE_DMA) { 4674 /* 4675 * Timings will be used for both PIO and DMA, 4676 * so adjust DMA mode if needed. 4677 */ 4678 if (drvp->PIO_mode >= 3) { 4679 if ((drvp->DMA_mode + 2) > drvp->PIO_mode) 4680 drvp->DMA_mode = drvp->PIO_mode - 2; 4681 if (drvp->DMA_mode < 1) { 4682 /* 4683 * Can't mix both PIO and DMA. 4684 * Disable DMA. 4685 */ 4686 drvp->drive_flags &= ~DRIVE_DMA; 4687 } 4688 } else { 4689 /* 4690 * Can't mix both PIO and DMA. Disable 4691 * DMA. 4692 */ 4693 drvp->drive_flags &= ~DRIVE_DMA; 4694 } 4695 } 4696 4697 if (drvp->drive_flags & DRIVE_DMA) { 4698 /* Use multi-word DMA. */ 4699 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on << 4700 PxDx_CMD_ON_SHIFT; 4701 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off; 4702 } else { 4703 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on << 4704 PxDx_CMD_ON_SHIFT; 4705 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off; 4706 } 4707 4708 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */ 4709 4710 /* ...and set the mode for this drive. */ 4711 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx); 4712 } 4713 4714 pciide_print_modes(cp); 4715 } 4716 4717 void 4718 serverworks_chip_map(sc, pa) 4719 struct pciide_softc *sc; 4720 struct pci_attach_args *pa; 4721 { 4722 struct pciide_channel *cp; 4723 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4724 pcitag_t pcib_tag; 4725 int channel; 4726 bus_size_t cmdsize, ctlsize; 4727 4728 if (pciide_chipen(sc, pa) == 0) 4729 return; 4730 4731 printf("%s: bus-master DMA support present", 4732 sc->sc_wdcdev.sc_dev.dv_xname); 4733 pciide_mapreg_dma(sc, pa); 4734 printf("\n"); 4735 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4736 WDC_CAPABILITY_MODE; 4737 4738 if (sc->sc_dma_ok) { 4739 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4740 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4741 sc->sc_wdcdev.irqack = pciide_irqack; 4742 } 4743 sc->sc_wdcdev.PIO_cap = 4; 4744 sc->sc_wdcdev.DMA_cap = 2; 4745 switch (sc->sc_pp->ide_product) { 4746 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE: 4747 sc->sc_wdcdev.UDMA_cap = 2; 4748 break; 4749 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE: 4750 if (PCI_REVISION(pa->pa_class) < 0x92) 4751 sc->sc_wdcdev.UDMA_cap = 4; 4752 else 4753 sc->sc_wdcdev.UDMA_cap = 5; 4754 break; 4755 } 4756 4757 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 4758 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4759 sc->sc_wdcdev.nchannels = 2; 4760 4761 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4762 cp = &sc->pciide_channels[channel]; 4763 if (pciide_chansetup(sc, channel, interface) == 0) 4764 continue; 4765 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4766 serverworks_pci_intr); 4767 if (cp->hw_ok == 0) 4768 return; 4769 pciide_map_compat_intr(pa, cp, channel, interface); 4770 if (cp->hw_ok == 0) 4771 return; 4772 serverworks_setup_channel(&cp->wdc_channel); 4773 } 4774 4775 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 4776 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 4777 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 4778 } 4779 4780 void 4781 serverworks_setup_channel(chp) 4782 struct channel_softc *chp; 4783 { 4784 struct ata_drive_datas *drvp; 4785 struct pciide_channel *cp = (struct pciide_channel*)chp; 4786 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4787 int channel = chp->channel; 4788 int drive, unit; 4789 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 4790 u_int32_t idedma_ctl; 4791 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 4792 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 4793 4794 /* setup DMA if needed */ 4795 pciide_channel_dma_setup(cp); 4796 4797 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 4798 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 4799 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 4800 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 4801 4802 pio_time &= ~(0xffff << (16 * channel)); 4803 dma_time &= ~(0xffff << (16 * channel)); 4804 pio_mode &= ~(0xff << (8 * channel + 16)); 4805 udma_mode &= ~(0xff << (8 * channel + 16)); 4806 udma_mode &= ~(3 << (2 * channel)); 4807 4808 idedma_ctl = 0; 4809 4810 /* Per drive settings */ 4811 for (drive = 0; drive < 2; drive++) { 4812 drvp = &chp->ch_drive[drive]; 4813 /* If no drive, skip */ 4814 if ((drvp->drive_flags & DRIVE) == 0) 4815 continue; 4816 unit = drive + 2 * channel; 4817 /* add timing values, setup DMA if needed */ 4818 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 4819 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 4820 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 4821 (drvp->drive_flags & DRIVE_UDMA)) { 4822 /* use Ultra/DMA, check for 80-pin cable */ 4823 if (drvp->UDMA_mode > 2 && 4824 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0) 4825 drvp->UDMA_mode = 2; 4826 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 4827 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 4828 udma_mode |= 1 << unit; 4829 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4830 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 4831 (drvp->drive_flags & DRIVE_DMA)) { 4832 /* use Multiword DMA */ 4833 drvp->drive_flags &= ~DRIVE_UDMA; 4834 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 4835 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4836 } else { 4837 /* PIO only */ 4838 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 4839 } 4840 } 4841 4842 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 4843 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 4844 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE) 4845 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 4846 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 4847 4848 if (idedma_ctl != 0) { 4849 /* Add software bits in status register */ 4850 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4851 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl); 4852 } 4853 pciide_print_modes(cp); 4854 } 4855 4856 int 4857 serverworks_pci_intr(arg) 4858 void *arg; 4859 { 4860 struct pciide_softc *sc = arg; 4861 struct pciide_channel *cp; 4862 struct channel_softc *wdc_cp; 4863 int rv = 0; 4864 int dmastat, i, crv; 4865 4866 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4867 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4868 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4869 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 4870 IDEDMA_CTL_INTR) 4871 continue; 4872 cp = &sc->pciide_channels[i]; 4873 wdc_cp = &cp->wdc_channel; 4874 crv = wdcintr(wdc_cp); 4875 if (crv == 0) { 4876 printf("%s:%d: bogus intr\n", 4877 sc->sc_wdcdev.sc_dev.dv_xname, i); 4878 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4879 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 4880 } else 4881 rv = 1; 4882 } 4883 return rv; 4884 } 4885