1 /* $NetBSD: pciide.c,v 1.174 2002/10/05 17:07:32 kent Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 36 /* 37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by Christopher G. Demetriou 50 * for the NetBSD Project. 51 * 4. The name of the author may not be used to endorse or promote products 52 * derived from this software without specific prior written permission 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * PCI IDE controller driver. 68 * 69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 70 * sys/dev/pci/ppb.c, revision 1.16). 71 * 72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 74 * 5/16/94" from the PCI SIG. 75 * 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.174 2002/10/05 17:07:32 kent Exp $"); 80 81 #ifndef WDCDEBUG 82 #define WDCDEBUG 83 #endif 84 85 #define DEBUG_DMA 0x01 86 #define DEBUG_XFERS 0x02 87 #define DEBUG_FUNCS 0x08 88 #define DEBUG_PROBE 0x10 89 #ifdef WDCDEBUG 90 int wdcdebug_pciide_mask = 0; 91 #define WDCDEBUG_PRINT(args, level) \ 92 if (wdcdebug_pciide_mask & (level)) printf args 93 #else 94 #define WDCDEBUG_PRINT(args, level) 95 #endif 96 #include <sys/param.h> 97 #include <sys/systm.h> 98 #include <sys/device.h> 99 #include <sys/malloc.h> 100 101 #include <uvm/uvm_extern.h> 102 103 #include <machine/endian.h> 104 105 #include <dev/pci/pcireg.h> 106 #include <dev/pci/pcivar.h> 107 #include <dev/pci/pcidevs.h> 108 #include <dev/pci/pciidereg.h> 109 #include <dev/pci/pciidevar.h> 110 #include <dev/pci/pciide_piix_reg.h> 111 #include <dev/pci/pciide_amd_reg.h> 112 #include <dev/pci/pciide_apollo_reg.h> 113 #include <dev/pci/pciide_cmd_reg.h> 114 #include <dev/pci/pciide_cy693_reg.h> 115 #include <dev/pci/pciide_sis_reg.h> 116 #include <dev/pci/pciide_acer_reg.h> 117 #include <dev/pci/pciide_pdc202xx_reg.h> 118 #include <dev/pci/pciide_opti_reg.h> 119 #include <dev/pci/pciide_hpt_reg.h> 120 #include <dev/pci/pciide_acard_reg.h> 121 #include <dev/pci/pciide_sl82c105_reg.h> 122 #include <dev/pci/cy82c693var.h> 123 124 #include "opt_pciide.h" 125 126 /* inlines for reading/writing 8-bit PCI registers */ 127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t, 128 int)); 129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t, 130 int, u_int8_t)); 131 132 static __inline u_int8_t 133 pciide_pci_read(pc, pa, reg) 134 pci_chipset_tag_t pc; 135 pcitag_t pa; 136 int reg; 137 { 138 139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 140 ((reg & 0x03) * 8) & 0xff); 141 } 142 143 static __inline void 144 pciide_pci_write(pc, pa, reg, val) 145 pci_chipset_tag_t pc; 146 pcitag_t pa; 147 int reg; 148 u_int8_t val; 149 { 150 pcireg_t pcival; 151 152 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 153 pcival &= ~(0xff << ((reg & 0x03) * 8)); 154 pcival |= (val << ((reg & 0x03) * 8)); 155 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 156 } 157 158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 159 160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 161 void piix_setup_channel __P((struct channel_softc*)); 162 void piix3_4_setup_channel __P((struct channel_softc*)); 163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*)); 165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 166 167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 168 void amd7x6_setup_channel __P((struct channel_softc*)); 169 170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 171 void apollo_setup_channel __P((struct channel_softc*)); 172 173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 175 void cmd0643_9_setup_channel __P((struct channel_softc*)); 176 void cmd_channel_map __P((struct pci_attach_args *, 177 struct pciide_softc *, int)); 178 int cmd_pci_intr __P((void *)); 179 void cmd646_9_irqack __P((struct channel_softc *)); 180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 181 void cmd680_setup_channel __P((struct channel_softc*)); 182 void cmd680_channel_map __P((struct pci_attach_args *, 183 struct pciide_softc *, int)); 184 185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 186 void cy693_setup_channel __P((struct channel_softc*)); 187 188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 189 void sis_setup_channel __P((struct channel_softc*)); 190 static int sis_hostbr_match __P(( struct pci_attach_args *)); 191 192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 193 void acer_setup_channel __P((struct channel_softc*)); 194 int acer_pci_intr __P((void *)); 195 196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 197 void pdc202xx_setup_channel __P((struct channel_softc*)); 198 void pdc20268_setup_channel __P((struct channel_softc*)); 199 int pdc202xx_pci_intr __P((void *)); 200 int pdc20265_pci_intr __P((void *)); 201 202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 203 void opti_setup_channel __P((struct channel_softc*)); 204 205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 206 void hpt_setup_channel __P((struct channel_softc*)); 207 int hpt_pci_intr __P((void *)); 208 209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 210 void acard_setup_channel __P((struct channel_softc*)); 211 int acard_pci_intr __P((void *)); 212 213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 214 void serverworks_setup_channel __P((struct channel_softc*)); 215 int serverworks_pci_intr __P((void *)); 216 217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 218 void sl82c105_setup_channel __P((struct channel_softc*)); 219 220 void pciide_channel_dma_setup __P((struct pciide_channel *)); 221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int)); 222 int pciide_dma_init __P((void*, int, int, void *, size_t, int)); 223 void pciide_dma_start __P((void*, int, int)); 224 int pciide_dma_finish __P((void*, int, int, int)); 225 void pciide_irqack __P((struct channel_softc *)); 226 void pciide_print_modes __P((struct pciide_channel *)); 227 228 struct pciide_product_desc { 229 u_int32_t ide_product; 230 int ide_flags; 231 const char *ide_name; 232 /* map and setup chip, probe drives */ 233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*)); 234 }; 235 236 /* Flags for ide_flags */ 237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 239 240 /* Default product description for devices not known from this controller */ 241 const struct pciide_product_desc default_product_desc = { 242 0, 243 0, 244 "Generic PCI IDE controller", 245 default_chip_map, 246 }; 247 248 const struct pciide_product_desc pciide_intel_products[] = { 249 { PCI_PRODUCT_INTEL_82092AA, 250 0, 251 "Intel 82092AA IDE controller", 252 default_chip_map, 253 }, 254 { PCI_PRODUCT_INTEL_82371FB_IDE, 255 0, 256 "Intel 82371FB IDE controller (PIIX)", 257 piix_chip_map, 258 }, 259 { PCI_PRODUCT_INTEL_82371SB_IDE, 260 0, 261 "Intel 82371SB IDE Interface (PIIX3)", 262 piix_chip_map, 263 }, 264 { PCI_PRODUCT_INTEL_82371AB_IDE, 265 0, 266 "Intel 82371AB IDE controller (PIIX4)", 267 piix_chip_map, 268 }, 269 { PCI_PRODUCT_INTEL_82440MX_IDE, 270 0, 271 "Intel 82440MX IDE controller", 272 piix_chip_map 273 }, 274 { PCI_PRODUCT_INTEL_82801AA_IDE, 275 0, 276 "Intel 82801AA IDE Controller (ICH)", 277 piix_chip_map, 278 }, 279 { PCI_PRODUCT_INTEL_82801AB_IDE, 280 0, 281 "Intel 82801AB IDE Controller (ICH0)", 282 piix_chip_map, 283 }, 284 { PCI_PRODUCT_INTEL_82801BA_IDE, 285 0, 286 "Intel 82801BA IDE Controller (ICH2)", 287 piix_chip_map, 288 }, 289 { PCI_PRODUCT_INTEL_82801BAM_IDE, 290 0, 291 "Intel 82801BAM IDE Controller (ICH2)", 292 piix_chip_map, 293 }, 294 { PCI_PRODUCT_INTEL_82801CA_IDE_1, 295 0, 296 "Intel 82801CA IDE Controller", 297 piix_chip_map, 298 }, 299 { PCI_PRODUCT_INTEL_82801CA_IDE_2, 300 0, 301 "Intel 82801CA IDE Controller", 302 piix_chip_map, 303 }, 304 { PCI_PRODUCT_INTEL_82801DB_IDE, 305 0, 306 "Intel 82801DB IDE Controller (ICH4)", 307 piix_chip_map, 308 }, 309 { 0, 310 0, 311 NULL, 312 NULL 313 } 314 }; 315 316 const struct pciide_product_desc pciide_amd_products[] = { 317 { PCI_PRODUCT_AMD_PBC756_IDE, 318 0, 319 "Advanced Micro Devices AMD756 IDE Controller", 320 amd7x6_chip_map 321 }, 322 { PCI_PRODUCT_AMD_PBC766_IDE, 323 0, 324 "Advanced Micro Devices AMD766 IDE Controller", 325 amd7x6_chip_map 326 }, 327 { PCI_PRODUCT_AMD_PBC768_IDE, 328 0, 329 "Advanced Micro Devices AMD768 IDE Controller", 330 amd7x6_chip_map 331 }, 332 { PCI_PRODUCT_AMD_PBC8111_IDE, 333 0, 334 "Advanced Micro Devices AMD8111 IDE Controller", 335 amd7x6_chip_map 336 }, 337 { 0, 338 0, 339 NULL, 340 NULL 341 } 342 }; 343 344 const struct pciide_product_desc pciide_cmd_products[] = { 345 { PCI_PRODUCT_CMDTECH_640, 346 0, 347 "CMD Technology PCI0640", 348 cmd_chip_map 349 }, 350 { PCI_PRODUCT_CMDTECH_643, 351 0, 352 "CMD Technology PCI0643", 353 cmd0643_9_chip_map, 354 }, 355 { PCI_PRODUCT_CMDTECH_646, 356 0, 357 "CMD Technology PCI0646", 358 cmd0643_9_chip_map, 359 }, 360 { PCI_PRODUCT_CMDTECH_648, 361 IDE_PCI_CLASS_OVERRIDE, 362 "CMD Technology PCI0648", 363 cmd0643_9_chip_map, 364 }, 365 { PCI_PRODUCT_CMDTECH_649, 366 IDE_PCI_CLASS_OVERRIDE, 367 "CMD Technology PCI0649", 368 cmd0643_9_chip_map, 369 }, 370 { PCI_PRODUCT_CMDTECH_680, 371 IDE_PCI_CLASS_OVERRIDE, 372 "Silicon Image 0680", 373 cmd680_chip_map, 374 }, 375 { 0, 376 0, 377 NULL, 378 NULL 379 } 380 }; 381 382 const struct pciide_product_desc pciide_via_products[] = { 383 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 384 0, 385 NULL, 386 apollo_chip_map, 387 }, 388 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 389 0, 390 NULL, 391 apollo_chip_map, 392 }, 393 { 0, 394 0, 395 NULL, 396 NULL 397 } 398 }; 399 400 const struct pciide_product_desc pciide_cypress_products[] = { 401 { PCI_PRODUCT_CONTAQ_82C693, 402 IDE_16BIT_IOSPACE, 403 "Cypress 82C693 IDE Controller", 404 cy693_chip_map, 405 }, 406 { 0, 407 0, 408 NULL, 409 NULL 410 } 411 }; 412 413 const struct pciide_product_desc pciide_sis_products[] = { 414 { PCI_PRODUCT_SIS_5597_IDE, 415 0, 416 "Silicon Integrated System 5597/5598 IDE controller", 417 sis_chip_map, 418 }, 419 { 0, 420 0, 421 NULL, 422 NULL 423 } 424 }; 425 426 const struct pciide_product_desc pciide_acer_products[] = { 427 { PCI_PRODUCT_ALI_M5229, 428 0, 429 "Acer Labs M5229 UDMA IDE Controller", 430 acer_chip_map, 431 }, 432 { 0, 433 0, 434 NULL, 435 NULL 436 } 437 }; 438 439 const struct pciide_product_desc pciide_promise_products[] = { 440 { PCI_PRODUCT_PROMISE_ULTRA33, 441 IDE_PCI_CLASS_OVERRIDE, 442 "Promise Ultra33/ATA Bus Master IDE Accelerator", 443 pdc202xx_chip_map, 444 }, 445 { PCI_PRODUCT_PROMISE_ULTRA66, 446 IDE_PCI_CLASS_OVERRIDE, 447 "Promise Ultra66/ATA Bus Master IDE Accelerator", 448 pdc202xx_chip_map, 449 }, 450 { PCI_PRODUCT_PROMISE_ULTRA100, 451 IDE_PCI_CLASS_OVERRIDE, 452 "Promise Ultra100/ATA Bus Master IDE Accelerator", 453 pdc202xx_chip_map, 454 }, 455 { PCI_PRODUCT_PROMISE_ULTRA100X, 456 IDE_PCI_CLASS_OVERRIDE, 457 "Promise Ultra100/ATA Bus Master IDE Accelerator", 458 pdc202xx_chip_map, 459 }, 460 { PCI_PRODUCT_PROMISE_ULTRA100TX2, 461 IDE_PCI_CLASS_OVERRIDE, 462 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator", 463 pdc202xx_chip_map, 464 }, 465 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2, 466 IDE_PCI_CLASS_OVERRIDE, 467 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator", 468 pdc202xx_chip_map, 469 }, 470 { PCI_PRODUCT_PROMISE_ULTRA133, 471 IDE_PCI_CLASS_OVERRIDE, 472 "Promise Ultra133/ATA Bus Master IDE Accelerator", 473 pdc202xx_chip_map, 474 }, 475 { PCI_PRODUCT_PROMISE_ULTRA133TX2, 476 IDE_PCI_CLASS_OVERRIDE, 477 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator", 478 pdc202xx_chip_map, 479 }, 480 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2, 481 IDE_PCI_CLASS_OVERRIDE, 482 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator", 483 pdc202xx_chip_map, 484 }, 485 { 0, 486 0, 487 NULL, 488 NULL 489 } 490 }; 491 492 const struct pciide_product_desc pciide_opti_products[] = { 493 { PCI_PRODUCT_OPTI_82C621, 494 0, 495 "OPTi 82c621 PCI IDE controller", 496 opti_chip_map, 497 }, 498 { PCI_PRODUCT_OPTI_82C568, 499 0, 500 "OPTi 82c568 (82c621 compatible) PCI IDE controller", 501 opti_chip_map, 502 }, 503 { PCI_PRODUCT_OPTI_82D568, 504 0, 505 "OPTi 82d568 (82c621 compatible) PCI IDE controller", 506 opti_chip_map, 507 }, 508 { 0, 509 0, 510 NULL, 511 NULL 512 } 513 }; 514 515 const struct pciide_product_desc pciide_triones_products[] = { 516 { PCI_PRODUCT_TRIONES_HPT366, 517 IDE_PCI_CLASS_OVERRIDE, 518 NULL, 519 hpt_chip_map, 520 }, 521 { PCI_PRODUCT_TRIONES_HPT372, 522 IDE_PCI_CLASS_OVERRIDE, 523 NULL, 524 hpt_chip_map 525 }, 526 { PCI_PRODUCT_TRIONES_HPT374, 527 IDE_PCI_CLASS_OVERRIDE, 528 NULL, 529 hpt_chip_map 530 }, 531 { 0, 532 0, 533 NULL, 534 NULL 535 } 536 }; 537 538 const struct pciide_product_desc pciide_acard_products[] = { 539 { PCI_PRODUCT_ACARD_ATP850U, 540 IDE_PCI_CLASS_OVERRIDE, 541 "Acard ATP850U Ultra33 IDE Controller", 542 acard_chip_map, 543 }, 544 { PCI_PRODUCT_ACARD_ATP860, 545 IDE_PCI_CLASS_OVERRIDE, 546 "Acard ATP860 Ultra66 IDE Controller", 547 acard_chip_map, 548 }, 549 { PCI_PRODUCT_ACARD_ATP860A, 550 IDE_PCI_CLASS_OVERRIDE, 551 "Acard ATP860-A Ultra66 IDE Controller", 552 acard_chip_map, 553 }, 554 { 0, 555 0, 556 NULL, 557 NULL 558 } 559 }; 560 561 const struct pciide_product_desc pciide_serverworks_products[] = { 562 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE, 563 0, 564 "ServerWorks OSB4 IDE Controller", 565 serverworks_chip_map, 566 }, 567 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE, 568 0, 569 "ServerWorks CSB5 IDE Controller", 570 serverworks_chip_map, 571 }, 572 { 0, 573 0, 574 NULL, 575 } 576 }; 577 578 const struct pciide_product_desc pciide_symphony_products[] = { 579 { PCI_PRODUCT_SYMPHONY_82C105, 580 0, 581 "Symphony Labs 82C105 IDE controller", 582 sl82c105_chip_map, 583 }, 584 { 0, 585 0, 586 NULL, 587 } 588 }; 589 590 const struct pciide_product_desc pciide_winbond_products[] = { 591 { PCI_PRODUCT_WINBOND_W83C553F_1, 592 0, 593 "Winbond W83C553F IDE controller", 594 sl82c105_chip_map, 595 }, 596 { 0, 597 0, 598 NULL, 599 } 600 }; 601 602 struct pciide_vendor_desc { 603 u_int32_t ide_vendor; 604 const struct pciide_product_desc *ide_products; 605 }; 606 607 const struct pciide_vendor_desc pciide_vendors[] = { 608 { PCI_VENDOR_INTEL, pciide_intel_products }, 609 { PCI_VENDOR_CMDTECH, pciide_cmd_products }, 610 { PCI_VENDOR_VIATECH, pciide_via_products }, 611 { PCI_VENDOR_CONTAQ, pciide_cypress_products }, 612 { PCI_VENDOR_SIS, pciide_sis_products }, 613 { PCI_VENDOR_ALI, pciide_acer_products }, 614 { PCI_VENDOR_PROMISE, pciide_promise_products }, 615 { PCI_VENDOR_AMD, pciide_amd_products }, 616 { PCI_VENDOR_OPTI, pciide_opti_products }, 617 { PCI_VENDOR_TRIONES, pciide_triones_products }, 618 { PCI_VENDOR_ACARD, pciide_acard_products }, 619 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products }, 620 { PCI_VENDOR_SYMPHONY, pciide_symphony_products }, 621 { PCI_VENDOR_WINBOND, pciide_winbond_products }, 622 { 0, NULL } 623 }; 624 625 /* options passed via the 'flags' config keyword */ 626 #define PCIIDE_OPTIONS_DMA 0x01 627 #define PCIIDE_OPTIONS_NODMA 0x02 628 629 int pciide_match __P((struct device *, struct cfdata *, void *)); 630 void pciide_attach __P((struct device *, struct device *, void *)); 631 632 CFATTACH_DECL(pciide, sizeof(struct pciide_softc), 633 pciide_match, pciide_attach, NULL, NULL); 634 635 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *)); 636 int pciide_mapregs_compat __P(( struct pci_attach_args *, 637 struct pciide_channel *, int, bus_size_t *, bus_size_t*)); 638 int pciide_mapregs_native __P((struct pci_attach_args *, 639 struct pciide_channel *, bus_size_t *, bus_size_t *, 640 int (*pci_intr) __P((void *)))); 641 void pciide_mapreg_dma __P((struct pciide_softc *, 642 struct pci_attach_args *)); 643 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t)); 644 void pciide_mapchan __P((struct pci_attach_args *, 645 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 646 int (*pci_intr) __P((void *)))); 647 int pciide_chan_candisable __P((struct pciide_channel *)); 648 void pciide_map_compat_intr __P(( struct pci_attach_args *, 649 struct pciide_channel *, int, int)); 650 int pciide_compat_intr __P((void *)); 651 int pciide_pci_intr __P((void *)); 652 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t)); 653 654 const struct pciide_product_desc * 655 pciide_lookup_product(id) 656 u_int32_t id; 657 { 658 const struct pciide_product_desc *pp; 659 const struct pciide_vendor_desc *vp; 660 661 for (vp = pciide_vendors; vp->ide_products != NULL; vp++) 662 if (PCI_VENDOR(id) == vp->ide_vendor) 663 break; 664 665 if ((pp = vp->ide_products) == NULL) 666 return NULL; 667 668 for (; pp->chip_map != NULL; pp++) 669 if (PCI_PRODUCT(id) == pp->ide_product) 670 break; 671 672 if (pp->chip_map == NULL) 673 return NULL; 674 return pp; 675 } 676 677 int 678 pciide_match(parent, match, aux) 679 struct device *parent; 680 struct cfdata *match; 681 void *aux; 682 { 683 struct pci_attach_args *pa = aux; 684 const struct pciide_product_desc *pp; 685 686 /* 687 * Check the ID register to see that it's a PCI IDE controller. 688 * If it is, we assume that we can deal with it; it _should_ 689 * work in a standardized way... 690 */ 691 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 692 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 693 return (1); 694 } 695 696 /* 697 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE 698 * controllers. Let see if we can deal with it anyway. 699 */ 700 pp = pciide_lookup_product(pa->pa_id); 701 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 702 return (1); 703 } 704 705 return (0); 706 } 707 708 void 709 pciide_attach(parent, self, aux) 710 struct device *parent, *self; 711 void *aux; 712 { 713 struct pci_attach_args *pa = aux; 714 pci_chipset_tag_t pc = pa->pa_pc; 715 pcitag_t tag = pa->pa_tag; 716 struct pciide_softc *sc = (struct pciide_softc *)self; 717 pcireg_t csr; 718 char devinfo[256]; 719 const char *displaydev; 720 721 sc->sc_pp = pciide_lookup_product(pa->pa_id); 722 if (sc->sc_pp == NULL) { 723 sc->sc_pp = &default_product_desc; 724 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); 725 displaydev = devinfo; 726 } else 727 displaydev = sc->sc_pp->ide_name; 728 729 /* if displaydev == NULL, printf is done in chip-specific map */ 730 if (displaydev) 731 printf(": %s (rev. 0x%02x)\n", displaydev, 732 PCI_REVISION(pa->pa_class)); 733 734 sc->sc_pc = pa->pa_pc; 735 sc->sc_tag = pa->pa_tag; 736 #ifdef WDCDEBUG 737 if (wdcdebug_pciide_mask & DEBUG_PROBE) 738 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 739 #endif 740 sc->sc_pp->chip_map(sc, pa); 741 742 if (sc->sc_dma_ok) { 743 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 744 csr |= PCI_COMMAND_MASTER_ENABLE; 745 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 746 } 747 WDCDEBUG_PRINT(("pciide: command/status register=%x\n", 748 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 749 } 750 751 /* tell whether the chip is enabled or not */ 752 int 753 pciide_chipen(sc, pa) 754 struct pciide_softc *sc; 755 struct pci_attach_args *pa; 756 { 757 pcireg_t csr; 758 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 759 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 760 PCI_COMMAND_STATUS_REG); 761 printf("%s: device disabled (at %s)\n", 762 sc->sc_wdcdev.sc_dev.dv_xname, 763 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 764 "device" : "bridge"); 765 return 0; 766 } 767 return 1; 768 } 769 770 int 771 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 772 struct pci_attach_args *pa; 773 struct pciide_channel *cp; 774 int compatchan; 775 bus_size_t *cmdsizep, *ctlsizep; 776 { 777 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 778 struct channel_softc *wdc_cp = &cp->wdc_channel; 779 780 cp->compat = 1; 781 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 782 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 783 784 wdc_cp->cmd_iot = pa->pa_iot; 785 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 786 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 787 printf("%s: couldn't map %s channel cmd regs\n", 788 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 789 return (0); 790 } 791 792 wdc_cp->ctl_iot = pa->pa_iot; 793 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 794 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 795 printf("%s: couldn't map %s channel ctl regs\n", 796 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 797 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 798 PCIIDE_COMPAT_CMD_SIZE); 799 return (0); 800 } 801 802 return (1); 803 } 804 805 int 806 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 807 struct pci_attach_args * pa; 808 struct pciide_channel *cp; 809 bus_size_t *cmdsizep, *ctlsizep; 810 int (*pci_intr) __P((void *)); 811 { 812 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 813 struct channel_softc *wdc_cp = &cp->wdc_channel; 814 const char *intrstr; 815 pci_intr_handle_t intrhandle; 816 817 cp->compat = 0; 818 819 if (sc->sc_pci_ih == NULL) { 820 if (pci_intr_map(pa, &intrhandle) != 0) { 821 printf("%s: couldn't map native-PCI interrupt\n", 822 sc->sc_wdcdev.sc_dev.dv_xname); 823 return 0; 824 } 825 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 826 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 827 intrhandle, IPL_BIO, pci_intr, sc); 828 if (sc->sc_pci_ih != NULL) { 829 printf("%s: using %s for native-PCI interrupt\n", 830 sc->sc_wdcdev.sc_dev.dv_xname, 831 intrstr ? intrstr : "unknown interrupt"); 832 } else { 833 printf("%s: couldn't establish native-PCI interrupt", 834 sc->sc_wdcdev.sc_dev.dv_xname); 835 if (intrstr != NULL) 836 printf(" at %s", intrstr); 837 printf("\n"); 838 return 0; 839 } 840 } 841 cp->ih = sc->sc_pci_ih; 842 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 843 PCI_MAPREG_TYPE_IO, 0, 844 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) { 845 printf("%s: couldn't map %s channel cmd regs\n", 846 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 847 return 0; 848 } 849 850 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 851 PCI_MAPREG_TYPE_IO, 0, 852 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) { 853 printf("%s: couldn't map %s channel ctl regs\n", 854 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 855 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 856 return 0; 857 } 858 /* 859 * In native mode, 4 bytes of I/O space are mapped for the control 860 * register, the control register is at offset 2. Pass the generic 861 * code a handle for only one byte at the right offset. 862 */ 863 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 864 &wdc_cp->ctl_ioh) != 0) { 865 printf("%s: unable to subregion %s channel ctl regs\n", 866 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 867 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 868 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 869 return 0; 870 } 871 return (1); 872 } 873 874 void 875 pciide_mapreg_dma(sc, pa) 876 struct pciide_softc *sc; 877 struct pci_attach_args *pa; 878 { 879 pcireg_t maptype; 880 bus_addr_t addr; 881 882 /* 883 * Map DMA registers 884 * 885 * Note that sc_dma_ok is the right variable to test to see if 886 * DMA can be done. If the interface doesn't support DMA, 887 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 888 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 889 * non-zero if the interface supports DMA and the registers 890 * could be mapped. 891 * 892 * XXX Note that despite the fact that the Bus Master IDE specs 893 * XXX say that "The bus master IDE function uses 16 bytes of IO 894 * XXX space," some controllers (at least the United 895 * XXX Microelectronics UM8886BF) place it in memory space. 896 */ 897 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 898 PCIIDE_REG_BUS_MASTER_DMA); 899 900 switch (maptype) { 901 case PCI_MAPREG_TYPE_IO: 902 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 903 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 904 &addr, NULL, NULL) == 0); 905 if (sc->sc_dma_ok == 0) { 906 printf(", but unused (couldn't query registers)"); 907 break; 908 } 909 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 910 && addr >= 0x10000) { 911 sc->sc_dma_ok = 0; 912 printf(", but unused (registers at unsafe address " 913 "%#lx)", (unsigned long)addr); 914 break; 915 } 916 /* FALLTHROUGH */ 917 918 case PCI_MAPREG_MEM_TYPE_32BIT: 919 sc->sc_dma_ok = (pci_mapreg_map(pa, 920 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 921 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 922 sc->sc_dmat = pa->pa_dmat; 923 if (sc->sc_dma_ok == 0) { 924 printf(", but unused (couldn't map registers)"); 925 } else { 926 sc->sc_wdcdev.dma_arg = sc; 927 sc->sc_wdcdev.dma_init = pciide_dma_init; 928 sc->sc_wdcdev.dma_start = pciide_dma_start; 929 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 930 } 931 932 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 933 PCIIDE_OPTIONS_NODMA) { 934 printf(", but unused (forced off by config file)"); 935 sc->sc_dma_ok = 0; 936 } 937 break; 938 939 default: 940 sc->sc_dma_ok = 0; 941 printf(", but unsupported register maptype (0x%x)", maptype); 942 } 943 } 944 945 int 946 pciide_compat_intr(arg) 947 void *arg; 948 { 949 struct pciide_channel *cp = arg; 950 951 #ifdef DIAGNOSTIC 952 /* should only be called for a compat channel */ 953 if (cp->compat == 0) 954 panic("pciide compat intr called for non-compat chan %p", cp); 955 #endif 956 return (wdcintr(&cp->wdc_channel)); 957 } 958 959 int 960 pciide_pci_intr(arg) 961 void *arg; 962 { 963 struct pciide_softc *sc = arg; 964 struct pciide_channel *cp; 965 struct channel_softc *wdc_cp; 966 int i, rv, crv; 967 968 rv = 0; 969 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 970 cp = &sc->pciide_channels[i]; 971 wdc_cp = &cp->wdc_channel; 972 973 /* If a compat channel skip. */ 974 if (cp->compat) 975 continue; 976 /* if this channel not waiting for intr, skip */ 977 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 978 continue; 979 980 crv = wdcintr(wdc_cp); 981 if (crv == 0) 982 ; /* leave rv alone */ 983 else if (crv == 1) 984 rv = 1; /* claim the intr */ 985 else if (rv == 0) /* crv should be -1 in this case */ 986 rv = crv; /* if we've done no better, take it */ 987 } 988 return (rv); 989 } 990 991 void 992 pciide_channel_dma_setup(cp) 993 struct pciide_channel *cp; 994 { 995 int drive; 996 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 997 struct ata_drive_datas *drvp; 998 999 for (drive = 0; drive < 2; drive++) { 1000 drvp = &cp->wdc_channel.ch_drive[drive]; 1001 /* If no drive, skip */ 1002 if ((drvp->drive_flags & DRIVE) == 0) 1003 continue; 1004 /* setup DMA if needed */ 1005 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1006 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1007 sc->sc_dma_ok == 0) { 1008 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1009 continue; 1010 } 1011 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1012 != 0) { 1013 /* Abort DMA setup */ 1014 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1015 continue; 1016 } 1017 } 1018 } 1019 1020 int 1021 pciide_dma_table_setup(sc, channel, drive) 1022 struct pciide_softc *sc; 1023 int channel, drive; 1024 { 1025 bus_dma_segment_t seg; 1026 int error, rseg; 1027 const bus_size_t dma_table_size = 1028 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1029 struct pciide_dma_maps *dma_maps = 1030 &sc->pciide_channels[channel].dma_maps[drive]; 1031 1032 /* If table was already allocated, just return */ 1033 if (dma_maps->dma_table) 1034 return 0; 1035 1036 /* Allocate memory for the DMA tables and map it */ 1037 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1038 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1039 BUS_DMA_NOWAIT)) != 0) { 1040 printf("%s:%d: unable to allocate table DMA for " 1041 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1042 channel, drive, error); 1043 return error; 1044 } 1045 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1046 dma_table_size, 1047 (caddr_t *)&dma_maps->dma_table, 1048 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1049 printf("%s:%d: unable to map table DMA for" 1050 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1051 channel, drive, error); 1052 return error; 1053 } 1054 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 1055 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 1056 (unsigned long)seg.ds_addr), DEBUG_PROBE); 1057 1058 /* Create and load table DMA map for this disk */ 1059 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1060 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1061 &dma_maps->dmamap_table)) != 0) { 1062 printf("%s:%d: unable to create table DMA map for " 1063 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1064 channel, drive, error); 1065 return error; 1066 } 1067 if ((error = bus_dmamap_load(sc->sc_dmat, 1068 dma_maps->dmamap_table, 1069 dma_maps->dma_table, 1070 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1071 printf("%s:%d: unable to load table DMA map for " 1072 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1073 channel, drive, error); 1074 return error; 1075 } 1076 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1077 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 1078 DEBUG_PROBE); 1079 /* Create a xfer DMA map for this drive */ 1080 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1081 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN, 1082 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1083 &dma_maps->dmamap_xfer)) != 0) { 1084 printf("%s:%d: unable to create xfer DMA map for " 1085 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1086 channel, drive, error); 1087 return error; 1088 } 1089 return 0; 1090 } 1091 1092 int 1093 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 1094 void *v; 1095 int channel, drive; 1096 void *databuf; 1097 size_t datalen; 1098 int flags; 1099 { 1100 struct pciide_softc *sc = v; 1101 int error, seg; 1102 struct pciide_dma_maps *dma_maps = 1103 &sc->pciide_channels[channel].dma_maps[drive]; 1104 1105 error = bus_dmamap_load(sc->sc_dmat, 1106 dma_maps->dmamap_xfer, 1107 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1108 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); 1109 if (error) { 1110 printf("%s:%d: unable to load xfer DMA map for" 1111 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1112 channel, drive, error); 1113 return error; 1114 } 1115 1116 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1117 dma_maps->dmamap_xfer->dm_mapsize, 1118 (flags & WDC_DMA_READ) ? 1119 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1120 1121 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1122 #ifdef DIAGNOSTIC 1123 /* A segment must not cross a 64k boundary */ 1124 { 1125 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1126 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1127 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1128 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1129 printf("pciide_dma: segment %d physical addr 0x%lx" 1130 " len 0x%lx not properly aligned\n", 1131 seg, phys, len); 1132 panic("pciide_dma: buf align"); 1133 } 1134 } 1135 #endif 1136 dma_maps->dma_table[seg].base_addr = 1137 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1138 dma_maps->dma_table[seg].byte_count = 1139 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1140 IDEDMA_BYTE_COUNT_MASK); 1141 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1142 seg, le32toh(dma_maps->dma_table[seg].byte_count), 1143 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1144 1145 } 1146 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1147 htole32(IDEDMA_BYTE_COUNT_EOT); 1148 1149 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1150 dma_maps->dmamap_table->dm_mapsize, 1151 BUS_DMASYNC_PREWRITE); 1152 1153 /* Maps are ready. Start DMA function */ 1154 #ifdef DIAGNOSTIC 1155 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1156 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1157 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 1158 panic("pciide_dma_init: table align"); 1159 } 1160 #endif 1161 1162 /* Clear status bits */ 1163 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1164 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, 1165 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1166 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel)); 1167 /* Write table addr */ 1168 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1169 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel, 1170 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1171 /* set read/write */ 1172 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1173 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1174 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 1175 /* remember flags */ 1176 dma_maps->dma_flags = flags; 1177 return 0; 1178 } 1179 1180 void 1181 pciide_dma_start(v, channel, drive) 1182 void *v; 1183 int channel, drive; 1184 { 1185 struct pciide_softc *sc = v; 1186 1187 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 1188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1189 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1190 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1191 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START); 1192 } 1193 1194 int 1195 pciide_dma_finish(v, channel, drive, force) 1196 void *v; 1197 int channel, drive; 1198 int force; 1199 { 1200 struct pciide_softc *sc = v; 1201 u_int8_t status; 1202 int error = 0; 1203 struct pciide_dma_maps *dma_maps = 1204 &sc->pciide_channels[channel].dma_maps[drive]; 1205 1206 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1207 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel); 1208 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1209 DEBUG_XFERS); 1210 1211 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 1212 return WDC_DMAST_NOIRQ; 1213 1214 /* stop DMA channel */ 1215 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1216 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1217 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1218 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START); 1219 1220 /* Unload the map of the data buffer */ 1221 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1222 dma_maps->dmamap_xfer->dm_mapsize, 1223 (dma_maps->dma_flags & WDC_DMA_READ) ? 1224 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1225 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1226 1227 if ((status & IDEDMA_CTL_ERR) != 0) { 1228 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1229 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1230 error |= WDC_DMAST_ERR; 1231 } 1232 1233 if ((status & IDEDMA_CTL_INTR) == 0) { 1234 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1235 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1236 drive, status); 1237 error |= WDC_DMAST_NOIRQ; 1238 } 1239 1240 if ((status & IDEDMA_CTL_ACT) != 0) { 1241 /* data underrun, may be a valid condition for ATAPI */ 1242 error |= WDC_DMAST_UNDER; 1243 } 1244 return error; 1245 } 1246 1247 void 1248 pciide_irqack(chp) 1249 struct channel_softc *chp; 1250 { 1251 struct pciide_channel *cp = (struct pciide_channel*)chp; 1252 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1253 1254 /* clear status bits in IDE DMA registers */ 1255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1256 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel, 1257 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1258 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel)); 1259 } 1260 1261 /* some common code used by several chip_map */ 1262 int 1263 pciide_chansetup(sc, channel, interface) 1264 struct pciide_softc *sc; 1265 int channel; 1266 pcireg_t interface; 1267 { 1268 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1269 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1270 cp->name = PCIIDE_CHANNEL_NAME(channel); 1271 cp->wdc_channel.channel = channel; 1272 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1273 cp->wdc_channel.ch_queue = 1274 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1275 if (cp->wdc_channel.ch_queue == NULL) { 1276 printf("%s %s channel: " 1277 "can't allocate memory for command queue", 1278 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1279 return 0; 1280 } 1281 printf("%s: %s channel %s to %s mode\n", 1282 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1283 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 1284 "configured" : "wired", 1285 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 1286 "native-PCI" : "compatibility"); 1287 return 1; 1288 } 1289 1290 /* some common code used by several chip channel_map */ 1291 void 1292 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1293 struct pci_attach_args *pa; 1294 struct pciide_channel *cp; 1295 pcireg_t interface; 1296 bus_size_t *cmdsizep, *ctlsizep; 1297 int (*pci_intr) __P((void *)); 1298 { 1299 struct channel_softc *wdc_cp = &cp->wdc_channel; 1300 1301 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1302 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1303 pci_intr); 1304 else 1305 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1306 wdc_cp->channel, cmdsizep, ctlsizep); 1307 1308 if (cp->hw_ok == 0) 1309 return; 1310 wdc_cp->data32iot = wdc_cp->cmd_iot; 1311 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1312 wdcattach(wdc_cp); 1313 } 1314 1315 /* 1316 * Generic code to call to know if a channel can be disabled. Return 1 1317 * if channel can be disabled, 0 if not 1318 */ 1319 int 1320 pciide_chan_candisable(cp) 1321 struct pciide_channel *cp; 1322 { 1323 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1324 struct channel_softc *wdc_cp = &cp->wdc_channel; 1325 1326 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1327 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1328 printf("%s: disabling %s channel (no drives)\n", 1329 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1330 cp->hw_ok = 0; 1331 return 1; 1332 } 1333 return 0; 1334 } 1335 1336 /* 1337 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1338 * Set hw_ok=0 on failure 1339 */ 1340 void 1341 pciide_map_compat_intr(pa, cp, compatchan, interface) 1342 struct pci_attach_args *pa; 1343 struct pciide_channel *cp; 1344 int compatchan, interface; 1345 { 1346 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1347 struct channel_softc *wdc_cp = &cp->wdc_channel; 1348 1349 if (cp->hw_ok == 0) 1350 return; 1351 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1352 return; 1353 1354 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 1355 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1356 pa, compatchan, pciide_compat_intr, cp); 1357 if (cp->ih == NULL) { 1358 #endif 1359 printf("%s: no compatibility interrupt for use by %s " 1360 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1361 cp->hw_ok = 0; 1362 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 1363 } 1364 #endif 1365 } 1366 1367 void 1368 pciide_print_modes(cp) 1369 struct pciide_channel *cp; 1370 { 1371 wdc_print_modes(&cp->wdc_channel); 1372 } 1373 1374 void 1375 default_chip_map(sc, pa) 1376 struct pciide_softc *sc; 1377 struct pci_attach_args *pa; 1378 { 1379 struct pciide_channel *cp; 1380 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1381 pcireg_t csr; 1382 int channel, drive; 1383 struct ata_drive_datas *drvp; 1384 u_int8_t idedma_ctl; 1385 bus_size_t cmdsize, ctlsize; 1386 char *failreason; 1387 1388 if (pciide_chipen(sc, pa) == 0) 1389 return; 1390 1391 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1392 printf("%s: bus-master DMA support present", 1393 sc->sc_wdcdev.sc_dev.dv_xname); 1394 if (sc->sc_pp == &default_product_desc && 1395 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1396 PCIIDE_OPTIONS_DMA) == 0) { 1397 printf(", but unused (no driver support)"); 1398 sc->sc_dma_ok = 0; 1399 } else { 1400 pciide_mapreg_dma(sc, pa); 1401 if (sc->sc_dma_ok != 0) 1402 printf(", used without full driver " 1403 "support"); 1404 } 1405 } else { 1406 printf("%s: hardware does not support DMA", 1407 sc->sc_wdcdev.sc_dev.dv_xname); 1408 sc->sc_dma_ok = 0; 1409 } 1410 printf("\n"); 1411 if (sc->sc_dma_ok) { 1412 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1413 sc->sc_wdcdev.irqack = pciide_irqack; 1414 } 1415 sc->sc_wdcdev.PIO_cap = 0; 1416 sc->sc_wdcdev.DMA_cap = 0; 1417 1418 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1419 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1420 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1421 1422 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1423 cp = &sc->pciide_channels[channel]; 1424 if (pciide_chansetup(sc, channel, interface) == 0) 1425 continue; 1426 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1427 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1428 &ctlsize, pciide_pci_intr); 1429 } else { 1430 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1431 channel, &cmdsize, &ctlsize); 1432 } 1433 if (cp->hw_ok == 0) 1434 continue; 1435 /* 1436 * Check to see if something appears to be there. 1437 */ 1438 failreason = NULL; 1439 if (!wdcprobe(&cp->wdc_channel)) { 1440 failreason = "not responding; disabled or no drives?"; 1441 goto next; 1442 } 1443 /* 1444 * Now, make sure it's actually attributable to this PCI IDE 1445 * channel by trying to access the channel again while the 1446 * PCI IDE controller's I/O space is disabled. (If the 1447 * channel no longer appears to be there, it belongs to 1448 * this controller.) YUCK! 1449 */ 1450 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1451 PCI_COMMAND_STATUS_REG); 1452 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1453 csr & ~PCI_COMMAND_IO_ENABLE); 1454 if (wdcprobe(&cp->wdc_channel)) 1455 failreason = "other hardware responding at addresses"; 1456 pci_conf_write(sc->sc_pc, sc->sc_tag, 1457 PCI_COMMAND_STATUS_REG, csr); 1458 next: 1459 if (failreason) { 1460 printf("%s: %s channel ignored (%s)\n", 1461 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1462 failreason); 1463 cp->hw_ok = 0; 1464 bus_space_unmap(cp->wdc_channel.cmd_iot, 1465 cp->wdc_channel.cmd_ioh, cmdsize); 1466 if (interface & PCIIDE_INTERFACE_PCI(channel)) 1467 bus_space_unmap(cp->wdc_channel.ctl_iot, 1468 cp->ctl_baseioh, ctlsize); 1469 else 1470 bus_space_unmap(cp->wdc_channel.ctl_iot, 1471 cp->wdc_channel.ctl_ioh, ctlsize); 1472 } else { 1473 pciide_map_compat_intr(pa, cp, channel, interface); 1474 } 1475 if (cp->hw_ok) { 1476 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1477 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1478 wdcattach(&cp->wdc_channel); 1479 } 1480 } 1481 1482 if (sc->sc_dma_ok == 0) 1483 return; 1484 1485 /* Allocate DMA maps */ 1486 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1487 idedma_ctl = 0; 1488 cp = &sc->pciide_channels[channel]; 1489 for (drive = 0; drive < 2; drive++) { 1490 drvp = &cp->wdc_channel.ch_drive[drive]; 1491 /* If no drive, skip */ 1492 if ((drvp->drive_flags & DRIVE) == 0) 1493 continue; 1494 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1495 continue; 1496 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1497 /* Abort DMA setup */ 1498 printf("%s:%d:%d: can't allocate DMA maps, " 1499 "using PIO transfers\n", 1500 sc->sc_wdcdev.sc_dev.dv_xname, 1501 channel, drive); 1502 drvp->drive_flags &= ~DRIVE_DMA; 1503 } 1504 printf("%s:%d:%d: using DMA data transfers\n", 1505 sc->sc_wdcdev.sc_dev.dv_xname, 1506 channel, drive); 1507 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1508 } 1509 if (idedma_ctl != 0) { 1510 /* Add software bits in status register */ 1511 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1512 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1513 idedma_ctl); 1514 } 1515 } 1516 } 1517 1518 void 1519 piix_chip_map(sc, pa) 1520 struct pciide_softc *sc; 1521 struct pci_attach_args *pa; 1522 { 1523 struct pciide_channel *cp; 1524 int channel; 1525 u_int32_t idetim; 1526 bus_size_t cmdsize, ctlsize; 1527 1528 if (pciide_chipen(sc, pa) == 0) 1529 return; 1530 1531 printf("%s: bus-master DMA support present", 1532 sc->sc_wdcdev.sc_dev.dv_xname); 1533 pciide_mapreg_dma(sc, pa); 1534 printf("\n"); 1535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1536 WDC_CAPABILITY_MODE; 1537 if (sc->sc_dma_ok) { 1538 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1539 sc->sc_wdcdev.irqack = pciide_irqack; 1540 switch(sc->sc_pp->ide_product) { 1541 case PCI_PRODUCT_INTEL_82371AB_IDE: 1542 case PCI_PRODUCT_INTEL_82440MX_IDE: 1543 case PCI_PRODUCT_INTEL_82801AA_IDE: 1544 case PCI_PRODUCT_INTEL_82801AB_IDE: 1545 case PCI_PRODUCT_INTEL_82801BA_IDE: 1546 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1547 case PCI_PRODUCT_INTEL_82801CA_IDE_1: 1548 case PCI_PRODUCT_INTEL_82801CA_IDE_2: 1549 case PCI_PRODUCT_INTEL_82801DB_IDE: 1550 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1551 } 1552 } 1553 sc->sc_wdcdev.PIO_cap = 4; 1554 sc->sc_wdcdev.DMA_cap = 2; 1555 switch(sc->sc_pp->ide_product) { 1556 case PCI_PRODUCT_INTEL_82801AA_IDE: 1557 sc->sc_wdcdev.UDMA_cap = 4; 1558 break; 1559 case PCI_PRODUCT_INTEL_82801BA_IDE: 1560 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1561 case PCI_PRODUCT_INTEL_82801CA_IDE_1: 1562 case PCI_PRODUCT_INTEL_82801CA_IDE_2: 1563 case PCI_PRODUCT_INTEL_82801DB_IDE: 1564 sc->sc_wdcdev.UDMA_cap = 5; 1565 break; 1566 default: 1567 sc->sc_wdcdev.UDMA_cap = 2; 1568 } 1569 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1570 sc->sc_wdcdev.set_modes = piix_setup_channel; 1571 else 1572 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1573 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1574 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1575 1576 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1577 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1578 DEBUG_PROBE); 1579 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1580 WDCDEBUG_PRINT((", sidetim=0x%x", 1581 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1582 DEBUG_PROBE); 1583 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1584 WDCDEBUG_PRINT((", udamreg 0x%x", 1585 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1586 DEBUG_PROBE); 1587 } 1588 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1589 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1590 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1591 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1592 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1593 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || 1594 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) { 1595 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1596 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1597 DEBUG_PROBE); 1598 } 1599 1600 } 1601 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1602 1603 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1604 cp = &sc->pciide_channels[channel]; 1605 /* PIIX is compat-only */ 1606 if (pciide_chansetup(sc, channel, 0) == 0) 1607 continue; 1608 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1609 if ((PIIX_IDETIM_READ(idetim, channel) & 1610 PIIX_IDETIM_IDE) == 0) { 1611 printf("%s: %s channel ignored (disabled)\n", 1612 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1613 continue; 1614 } 1615 /* PIIX are compat-only pciide devices */ 1616 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1617 if (cp->hw_ok == 0) 1618 continue; 1619 if (pciide_chan_candisable(cp)) { 1620 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1621 channel); 1622 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1623 idetim); 1624 } 1625 pciide_map_compat_intr(pa, cp, channel, 0); 1626 if (cp->hw_ok == 0) 1627 continue; 1628 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1629 } 1630 1631 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1632 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1633 DEBUG_PROBE); 1634 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1635 WDCDEBUG_PRINT((", sidetim=0x%x", 1636 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1637 DEBUG_PROBE); 1638 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1639 WDCDEBUG_PRINT((", udamreg 0x%x", 1640 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1641 DEBUG_PROBE); 1642 } 1643 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1644 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1645 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1646 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1647 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1648 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || 1649 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) { 1650 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1651 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1652 DEBUG_PROBE); 1653 } 1654 } 1655 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1656 } 1657 1658 void 1659 piix_setup_channel(chp) 1660 struct channel_softc *chp; 1661 { 1662 u_int8_t mode[2], drive; 1663 u_int32_t oidetim, idetim, idedma_ctl; 1664 struct pciide_channel *cp = (struct pciide_channel*)chp; 1665 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1666 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1667 1668 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1669 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1670 idedma_ctl = 0; 1671 1672 /* set up new idetim: Enable IDE registers decode */ 1673 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1674 chp->channel); 1675 1676 /* setup DMA */ 1677 pciide_channel_dma_setup(cp); 1678 1679 /* 1680 * Here we have to mess up with drives mode: PIIX can't have 1681 * different timings for master and slave drives. 1682 * We need to find the best combination. 1683 */ 1684 1685 /* If both drives supports DMA, take the lower mode */ 1686 if ((drvp[0].drive_flags & DRIVE_DMA) && 1687 (drvp[1].drive_flags & DRIVE_DMA)) { 1688 mode[0] = mode[1] = 1689 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1690 drvp[0].DMA_mode = mode[0]; 1691 drvp[1].DMA_mode = mode[1]; 1692 goto ok; 1693 } 1694 /* 1695 * If only one drive supports DMA, use its mode, and 1696 * put the other one in PIO mode 0 if mode not compatible 1697 */ 1698 if (drvp[0].drive_flags & DRIVE_DMA) { 1699 mode[0] = drvp[0].DMA_mode; 1700 mode[1] = drvp[1].PIO_mode; 1701 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1702 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1703 mode[1] = drvp[1].PIO_mode = 0; 1704 goto ok; 1705 } 1706 if (drvp[1].drive_flags & DRIVE_DMA) { 1707 mode[1] = drvp[1].DMA_mode; 1708 mode[0] = drvp[0].PIO_mode; 1709 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1710 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1711 mode[0] = drvp[0].PIO_mode = 0; 1712 goto ok; 1713 } 1714 /* 1715 * If both drives are not DMA, takes the lower mode, unless 1716 * one of them is PIO mode < 2 1717 */ 1718 if (drvp[0].PIO_mode < 2) { 1719 mode[0] = drvp[0].PIO_mode = 0; 1720 mode[1] = drvp[1].PIO_mode; 1721 } else if (drvp[1].PIO_mode < 2) { 1722 mode[1] = drvp[1].PIO_mode = 0; 1723 mode[0] = drvp[0].PIO_mode; 1724 } else { 1725 mode[0] = mode[1] = 1726 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1727 drvp[0].PIO_mode = mode[0]; 1728 drvp[1].PIO_mode = mode[1]; 1729 } 1730 ok: /* The modes are setup */ 1731 for (drive = 0; drive < 2; drive++) { 1732 if (drvp[drive].drive_flags & DRIVE_DMA) { 1733 idetim |= piix_setup_idetim_timings( 1734 mode[drive], 1, chp->channel); 1735 goto end; 1736 } 1737 } 1738 /* If we are there, none of the drives are DMA */ 1739 if (mode[0] >= 2) 1740 idetim |= piix_setup_idetim_timings( 1741 mode[0], 0, chp->channel); 1742 else 1743 idetim |= piix_setup_idetim_timings( 1744 mode[1], 0, chp->channel); 1745 end: /* 1746 * timing mode is now set up in the controller. Enable 1747 * it per-drive 1748 */ 1749 for (drive = 0; drive < 2; drive++) { 1750 /* If no drive, skip */ 1751 if ((drvp[drive].drive_flags & DRIVE) == 0) 1752 continue; 1753 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1754 if (drvp[drive].drive_flags & DRIVE_DMA) 1755 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1756 } 1757 if (idedma_ctl != 0) { 1758 /* Add software bits in status register */ 1759 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1760 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1761 idedma_ctl); 1762 } 1763 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1764 pciide_print_modes(cp); 1765 } 1766 1767 void 1768 piix3_4_setup_channel(chp) 1769 struct channel_softc *chp; 1770 { 1771 struct ata_drive_datas *drvp; 1772 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1773 struct pciide_channel *cp = (struct pciide_channel*)chp; 1774 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1775 int drive; 1776 int channel = chp->channel; 1777 1778 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1779 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1780 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1781 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1782 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1783 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1784 PIIX_SIDETIM_RTC_MASK(channel)); 1785 1786 idedma_ctl = 0; 1787 /* If channel disabled, no need to go further */ 1788 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1789 return; 1790 /* set up new idetim: Enable IDE registers decode */ 1791 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1792 1793 /* setup DMA if needed */ 1794 pciide_channel_dma_setup(cp); 1795 1796 for (drive = 0; drive < 2; drive++) { 1797 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 1798 PIIX_UDMATIM_SET(0x3, channel, drive)); 1799 drvp = &chp->ch_drive[drive]; 1800 /* If no drive, skip */ 1801 if ((drvp->drive_flags & DRIVE) == 0) 1802 continue; 1803 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1804 (drvp->drive_flags & DRIVE_UDMA) == 0)) 1805 goto pio; 1806 1807 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1808 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1809 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1810 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1811 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1812 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || 1813 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) { 1814 ideconf |= PIIX_CONFIG_PINGPONG; 1815 } 1816 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1817 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1818 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || 1819 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || 1820 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) { 1821 /* setup Ultra/100 */ 1822 if (drvp->UDMA_mode > 2 && 1823 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1824 drvp->UDMA_mode = 2; 1825 if (drvp->UDMA_mode > 4) { 1826 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 1827 } else { 1828 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 1829 if (drvp->UDMA_mode > 2) { 1830 ideconf |= PIIX_CONFIG_UDMA66(channel, 1831 drive); 1832 } else { 1833 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 1834 drive); 1835 } 1836 } 1837 } 1838 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 1839 /* setup Ultra/66 */ 1840 if (drvp->UDMA_mode > 2 && 1841 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1842 drvp->UDMA_mode = 2; 1843 if (drvp->UDMA_mode > 2) 1844 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 1845 else 1846 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 1847 } 1848 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1849 (drvp->drive_flags & DRIVE_UDMA)) { 1850 /* use Ultra/DMA */ 1851 drvp->drive_flags &= ~DRIVE_DMA; 1852 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 1853 udmareg |= PIIX_UDMATIM_SET( 1854 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 1855 } else { 1856 /* use Multiword DMA */ 1857 drvp->drive_flags &= ~DRIVE_UDMA; 1858 if (drive == 0) { 1859 idetim |= piix_setup_idetim_timings( 1860 drvp->DMA_mode, 1, channel); 1861 } else { 1862 sidetim |= piix_setup_sidetim_timings( 1863 drvp->DMA_mode, 1, channel); 1864 idetim =PIIX_IDETIM_SET(idetim, 1865 PIIX_IDETIM_SITRE, channel); 1866 } 1867 } 1868 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1869 1870 pio: /* use PIO mode */ 1871 idetim |= piix_setup_idetim_drvs(drvp); 1872 if (drive == 0) { 1873 idetim |= piix_setup_idetim_timings( 1874 drvp->PIO_mode, 0, channel); 1875 } else { 1876 sidetim |= piix_setup_sidetim_timings( 1877 drvp->PIO_mode, 0, channel); 1878 idetim =PIIX_IDETIM_SET(idetim, 1879 PIIX_IDETIM_SITRE, channel); 1880 } 1881 } 1882 if (idedma_ctl != 0) { 1883 /* Add software bits in status register */ 1884 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1885 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1886 idedma_ctl); 1887 } 1888 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1889 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 1890 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 1891 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 1892 pciide_print_modes(cp); 1893 } 1894 1895 1896 /* setup ISP and RTC fields, based on mode */ 1897 static u_int32_t 1898 piix_setup_idetim_timings(mode, dma, channel) 1899 u_int8_t mode; 1900 u_int8_t dma; 1901 u_int8_t channel; 1902 { 1903 1904 if (dma) 1905 return PIIX_IDETIM_SET(0, 1906 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 1907 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 1908 channel); 1909 else 1910 return PIIX_IDETIM_SET(0, 1911 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 1912 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 1913 channel); 1914 } 1915 1916 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 1917 static u_int32_t 1918 piix_setup_idetim_drvs(drvp) 1919 struct ata_drive_datas *drvp; 1920 { 1921 u_int32_t ret = 0; 1922 struct channel_softc *chp = drvp->chnl_softc; 1923 u_int8_t channel = chp->channel; 1924 u_int8_t drive = drvp->drive; 1925 1926 /* 1927 * If drive is using UDMA, timings setups are independant 1928 * So just check DMA and PIO here. 1929 */ 1930 if (drvp->drive_flags & DRIVE_DMA) { 1931 /* if mode = DMA mode 0, use compatible timings */ 1932 if ((drvp->drive_flags & DRIVE_DMA) && 1933 drvp->DMA_mode == 0) { 1934 drvp->PIO_mode = 0; 1935 return ret; 1936 } 1937 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1938 /* 1939 * PIO and DMA timings are the same, use fast timings for PIO 1940 * too, else use compat timings. 1941 */ 1942 if ((piix_isp_pio[drvp->PIO_mode] != 1943 piix_isp_dma[drvp->DMA_mode]) || 1944 (piix_rtc_pio[drvp->PIO_mode] != 1945 piix_rtc_dma[drvp->DMA_mode])) 1946 drvp->PIO_mode = 0; 1947 /* if PIO mode <= 2, use compat timings for PIO */ 1948 if (drvp->PIO_mode <= 2) { 1949 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 1950 channel); 1951 return ret; 1952 } 1953 } 1954 1955 /* 1956 * Now setup PIO modes. If mode < 2, use compat timings. 1957 * Else enable fast timings. Enable IORDY and prefetch/post 1958 * if PIO mode >= 3. 1959 */ 1960 1961 if (drvp->PIO_mode < 2) 1962 return ret; 1963 1964 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1965 if (drvp->PIO_mode >= 3) { 1966 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 1967 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 1968 } 1969 return ret; 1970 } 1971 1972 /* setup values in SIDETIM registers, based on mode */ 1973 static u_int32_t 1974 piix_setup_sidetim_timings(mode, dma, channel) 1975 u_int8_t mode; 1976 u_int8_t dma; 1977 u_int8_t channel; 1978 { 1979 if (dma) 1980 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 1981 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 1982 else 1983 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 1984 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 1985 } 1986 1987 void 1988 amd7x6_chip_map(sc, pa) 1989 struct pciide_softc *sc; 1990 struct pci_attach_args *pa; 1991 { 1992 struct pciide_channel *cp; 1993 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1994 int channel; 1995 pcireg_t chanenable; 1996 bus_size_t cmdsize, ctlsize; 1997 1998 if (pciide_chipen(sc, pa) == 0) 1999 return; 2000 printf("%s: bus-master DMA support present", 2001 sc->sc_wdcdev.sc_dev.dv_xname); 2002 pciide_mapreg_dma(sc, pa); 2003 printf("\n"); 2004 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2005 WDC_CAPABILITY_MODE; 2006 if (sc->sc_dma_ok) { 2007 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2008 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2009 sc->sc_wdcdev.irqack = pciide_irqack; 2010 } 2011 sc->sc_wdcdev.PIO_cap = 4; 2012 sc->sc_wdcdev.DMA_cap = 2; 2013 2014 switch (sc->sc_pp->ide_product) { 2015 case PCI_PRODUCT_AMD_PBC766_IDE: 2016 case PCI_PRODUCT_AMD_PBC768_IDE: 2017 case PCI_PRODUCT_AMD_PBC8111_IDE: 2018 sc->sc_wdcdev.UDMA_cap = 5; 2019 break; 2020 default: 2021 sc->sc_wdcdev.UDMA_cap = 4; 2022 } 2023 sc->sc_wdcdev.set_modes = amd7x6_setup_channel; 2024 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2025 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2026 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN); 2027 2028 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable), 2029 DEBUG_PROBE); 2030 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2031 cp = &sc->pciide_channels[channel]; 2032 if (pciide_chansetup(sc, channel, interface) == 0) 2033 continue; 2034 2035 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) { 2036 printf("%s: %s channel ignored (disabled)\n", 2037 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2038 continue; 2039 } 2040 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2041 pciide_pci_intr); 2042 2043 if (pciide_chan_candisable(cp)) 2044 chanenable &= ~AMD7X6_CHAN_EN(channel); 2045 pciide_map_compat_intr(pa, cp, channel, interface); 2046 if (cp->hw_ok == 0) 2047 continue; 2048 2049 amd7x6_setup_channel(&cp->wdc_channel); 2050 } 2051 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN, 2052 chanenable); 2053 return; 2054 } 2055 2056 void 2057 amd7x6_setup_channel(chp) 2058 struct channel_softc *chp; 2059 { 2060 u_int32_t udmatim_reg, datatim_reg; 2061 u_int8_t idedma_ctl; 2062 int mode, drive; 2063 struct ata_drive_datas *drvp; 2064 struct pciide_channel *cp = (struct pciide_channel*)chp; 2065 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2066 #ifndef PCIIDE_AMD756_ENABLEDMA 2067 int rev = PCI_REVISION( 2068 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2069 #endif 2070 2071 idedma_ctl = 0; 2072 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM); 2073 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA); 2074 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel); 2075 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel); 2076 2077 /* setup DMA if needed */ 2078 pciide_channel_dma_setup(cp); 2079 2080 for (drive = 0; drive < 2; drive++) { 2081 drvp = &chp->ch_drive[drive]; 2082 /* If no drive, skip */ 2083 if ((drvp->drive_flags & DRIVE) == 0) 2084 continue; 2085 /* add timing values, setup DMA if needed */ 2086 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2087 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2088 mode = drvp->PIO_mode; 2089 goto pio; 2090 } 2091 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2092 (drvp->drive_flags & DRIVE_UDMA)) { 2093 /* use Ultra/DMA */ 2094 drvp->drive_flags &= ~DRIVE_DMA; 2095 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) | 2096 AMD7X6_UDMA_EN_MTH(chp->channel, drive) | 2097 AMD7X6_UDMA_TIME(chp->channel, drive, 2098 amd7x6_udma_tim[drvp->UDMA_mode]); 2099 /* can use PIO timings, MW DMA unused */ 2100 mode = drvp->PIO_mode; 2101 } else { 2102 /* use Multiword DMA, but only if revision is OK */ 2103 drvp->drive_flags &= ~DRIVE_UDMA; 2104 #ifndef PCIIDE_AMD756_ENABLEDMA 2105 /* 2106 * The workaround doesn't seem to be necessary 2107 * with all drives, so it can be disabled by 2108 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 2109 * triggered. 2110 */ 2111 if (sc->sc_pp->ide_product == 2112 PCI_PRODUCT_AMD_PBC756_IDE && 2113 AMD756_CHIPREV_DISABLEDMA(rev)) { 2114 printf("%s:%d:%d: multi-word DMA disabled due " 2115 "to chip revision\n", 2116 sc->sc_wdcdev.sc_dev.dv_xname, 2117 chp->channel, drive); 2118 mode = drvp->PIO_mode; 2119 drvp->drive_flags &= ~DRIVE_DMA; 2120 goto pio; 2121 } 2122 #endif 2123 /* mode = min(pio, dma+2) */ 2124 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2125 mode = drvp->PIO_mode; 2126 else 2127 mode = drvp->DMA_mode + 2; 2128 } 2129 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2130 2131 pio: /* setup PIO mode */ 2132 if (mode <= 2) { 2133 drvp->DMA_mode = 0; 2134 drvp->PIO_mode = 0; 2135 mode = 0; 2136 } else { 2137 drvp->PIO_mode = mode; 2138 drvp->DMA_mode = mode - 2; 2139 } 2140 datatim_reg |= 2141 AMD7X6_DATATIM_PULSE(chp->channel, drive, 2142 amd7x6_pio_set[mode]) | 2143 AMD7X6_DATATIM_RECOV(chp->channel, drive, 2144 amd7x6_pio_rec[mode]); 2145 } 2146 if (idedma_ctl != 0) { 2147 /* Add software bits in status register */ 2148 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2149 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2150 idedma_ctl); 2151 } 2152 pciide_print_modes(cp); 2153 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg); 2154 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg); 2155 } 2156 2157 void 2158 apollo_chip_map(sc, pa) 2159 struct pciide_softc *sc; 2160 struct pci_attach_args *pa; 2161 { 2162 struct pciide_channel *cp; 2163 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2164 int channel; 2165 u_int32_t ideconf; 2166 bus_size_t cmdsize, ctlsize; 2167 pcitag_t pcib_tag; 2168 pcireg_t pcib_id, pcib_class; 2169 2170 if (pciide_chipen(sc, pa) == 0) 2171 return; 2172 /* get a PCI tag for the ISA bridge (function 0 of the same device) */ 2173 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 2174 /* and read ID and rev of the ISA bridge */ 2175 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG); 2176 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG); 2177 printf(": VIA Technologies "); 2178 switch (PCI_PRODUCT(pcib_id)) { 2179 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 2180 printf("VT82C586 (Apollo VP) "); 2181 if(PCI_REVISION(pcib_class) >= 0x02) { 2182 printf("ATA33 controller\n"); 2183 sc->sc_wdcdev.UDMA_cap = 2; 2184 } else { 2185 printf("controller\n"); 2186 sc->sc_wdcdev.UDMA_cap = 0; 2187 } 2188 break; 2189 case PCI_PRODUCT_VIATECH_VT82C596A: 2190 printf("VT82C596A (Apollo Pro) "); 2191 if (PCI_REVISION(pcib_class) >= 0x12) { 2192 printf("ATA66 controller\n"); 2193 sc->sc_wdcdev.UDMA_cap = 4; 2194 } else { 2195 printf("ATA33 controller\n"); 2196 sc->sc_wdcdev.UDMA_cap = 2; 2197 } 2198 break; 2199 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 2200 printf("VT82C686A (Apollo KX133) "); 2201 if (PCI_REVISION(pcib_class) >= 0x40) { 2202 printf("ATA100 controller\n"); 2203 sc->sc_wdcdev.UDMA_cap = 5; 2204 } else { 2205 printf("ATA66 controller\n"); 2206 sc->sc_wdcdev.UDMA_cap = 4; 2207 } 2208 break; 2209 case PCI_PRODUCT_VIATECH_VT8231: 2210 printf("VT8231 ATA100 controller\n"); 2211 sc->sc_wdcdev.UDMA_cap = 5; 2212 break; 2213 case PCI_PRODUCT_VIATECH_VT8233: 2214 printf("VT8233 ATA100 controller\n"); 2215 sc->sc_wdcdev.UDMA_cap = 5; 2216 break; 2217 case PCI_PRODUCT_VIATECH_VT8233A: 2218 printf("VT8233A ATA133 controller\n"); 2219 sc->sc_wdcdev.UDMA_cap = 6; 2220 break; 2221 case PCI_PRODUCT_VIATECH_VT8235: 2222 printf("VT8235 ATA133 controller\n"); 2223 sc->sc_wdcdev.UDMA_cap = 6; 2224 break; 2225 default: 2226 printf("unknown ATA controller\n"); 2227 sc->sc_wdcdev.UDMA_cap = 0; 2228 } 2229 2230 printf("%s: bus-master DMA support present", 2231 sc->sc_wdcdev.sc_dev.dv_xname); 2232 pciide_mapreg_dma(sc, pa); 2233 printf("\n"); 2234 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2235 WDC_CAPABILITY_MODE; 2236 if (sc->sc_dma_ok) { 2237 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2238 sc->sc_wdcdev.irqack = pciide_irqack; 2239 if (sc->sc_wdcdev.UDMA_cap > 0) 2240 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2241 } 2242 sc->sc_wdcdev.PIO_cap = 4; 2243 sc->sc_wdcdev.DMA_cap = 2; 2244 sc->sc_wdcdev.set_modes = apollo_setup_channel; 2245 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2246 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2247 2248 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 2249 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2250 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 2251 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 2252 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2253 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 2254 DEBUG_PROBE); 2255 2256 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2257 cp = &sc->pciide_channels[channel]; 2258 if (pciide_chansetup(sc, channel, interface) == 0) 2259 continue; 2260 2261 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 2262 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 2263 printf("%s: %s channel ignored (disabled)\n", 2264 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2265 continue; 2266 } 2267 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2268 pciide_pci_intr); 2269 if (cp->hw_ok == 0) 2270 continue; 2271 if (pciide_chan_candisable(cp)) { 2272 ideconf &= ~APO_IDECONF_EN(channel); 2273 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 2274 ideconf); 2275 } 2276 pciide_map_compat_intr(pa, cp, channel, interface); 2277 2278 if (cp->hw_ok == 0) 2279 continue; 2280 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 2281 } 2282 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2283 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2284 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 2285 } 2286 2287 void 2288 apollo_setup_channel(chp) 2289 struct channel_softc *chp; 2290 { 2291 u_int32_t udmatim_reg, datatim_reg; 2292 u_int8_t idedma_ctl; 2293 int mode, drive; 2294 struct ata_drive_datas *drvp; 2295 struct pciide_channel *cp = (struct pciide_channel*)chp; 2296 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2297 2298 idedma_ctl = 0; 2299 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 2300 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 2301 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 2302 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 2303 2304 /* setup DMA if needed */ 2305 pciide_channel_dma_setup(cp); 2306 2307 for (drive = 0; drive < 2; drive++) { 2308 drvp = &chp->ch_drive[drive]; 2309 /* If no drive, skip */ 2310 if ((drvp->drive_flags & DRIVE) == 0) 2311 continue; 2312 /* add timing values, setup DMA if needed */ 2313 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2314 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2315 mode = drvp->PIO_mode; 2316 goto pio; 2317 } 2318 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2319 (drvp->drive_flags & DRIVE_UDMA)) { 2320 /* use Ultra/DMA */ 2321 drvp->drive_flags &= ~DRIVE_DMA; 2322 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2323 APO_UDMA_EN_MTH(chp->channel, drive); 2324 if (sc->sc_wdcdev.UDMA_cap == 6) { 2325 /* 8233a */ 2326 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2327 drive, apollo_udma133_tim[drvp->UDMA_mode]); 2328 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 2329 /* 686b */ 2330 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2331 drive, apollo_udma100_tim[drvp->UDMA_mode]); 2332 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 2333 /* 596b or 686a */ 2334 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 2335 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2336 drive, apollo_udma66_tim[drvp->UDMA_mode]); 2337 } else { 2338 /* 596a or 586b */ 2339 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2340 drive, apollo_udma33_tim[drvp->UDMA_mode]); 2341 } 2342 /* can use PIO timings, MW DMA unused */ 2343 mode = drvp->PIO_mode; 2344 } else { 2345 /* use Multiword DMA */ 2346 drvp->drive_flags &= ~DRIVE_UDMA; 2347 /* mode = min(pio, dma+2) */ 2348 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2349 mode = drvp->PIO_mode; 2350 else 2351 mode = drvp->DMA_mode + 2; 2352 } 2353 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2354 2355 pio: /* setup PIO mode */ 2356 if (mode <= 2) { 2357 drvp->DMA_mode = 0; 2358 drvp->PIO_mode = 0; 2359 mode = 0; 2360 } else { 2361 drvp->PIO_mode = mode; 2362 drvp->DMA_mode = mode - 2; 2363 } 2364 datatim_reg |= 2365 APO_DATATIM_PULSE(chp->channel, drive, 2366 apollo_pio_set[mode]) | 2367 APO_DATATIM_RECOV(chp->channel, drive, 2368 apollo_pio_rec[mode]); 2369 } 2370 if (idedma_ctl != 0) { 2371 /* Add software bits in status register */ 2372 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2373 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2374 idedma_ctl); 2375 } 2376 pciide_print_modes(cp); 2377 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2378 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2379 } 2380 2381 void 2382 cmd_channel_map(pa, sc, channel) 2383 struct pci_attach_args *pa; 2384 struct pciide_softc *sc; 2385 int channel; 2386 { 2387 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2388 bus_size_t cmdsize, ctlsize; 2389 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2390 int interface, one_channel; 2391 2392 /* 2393 * The 0648/0649 can be told to identify as a RAID controller. 2394 * In this case, we have to fake interface 2395 */ 2396 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2397 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2398 PCIIDE_INTERFACE_SETTABLE(1); 2399 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2400 CMD_CONF_DSA1) 2401 interface |= PCIIDE_INTERFACE_PCI(0) | 2402 PCIIDE_INTERFACE_PCI(1); 2403 } else { 2404 interface = PCI_INTERFACE(pa->pa_class); 2405 } 2406 2407 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2408 cp->name = PCIIDE_CHANNEL_NAME(channel); 2409 cp->wdc_channel.channel = channel; 2410 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2411 2412 /* 2413 * Older CMD64X doesn't have independant channels 2414 */ 2415 switch (sc->sc_pp->ide_product) { 2416 case PCI_PRODUCT_CMDTECH_649: 2417 one_channel = 0; 2418 break; 2419 default: 2420 one_channel = 1; 2421 break; 2422 } 2423 2424 if (channel > 0 && one_channel) { 2425 cp->wdc_channel.ch_queue = 2426 sc->pciide_channels[0].wdc_channel.ch_queue; 2427 } else { 2428 cp->wdc_channel.ch_queue = 2429 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2430 } 2431 if (cp->wdc_channel.ch_queue == NULL) { 2432 printf("%s %s channel: " 2433 "can't allocate memory for command queue", 2434 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2435 return; 2436 } 2437 2438 printf("%s: %s channel %s to %s mode\n", 2439 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2440 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2441 "configured" : "wired", 2442 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2443 "native-PCI" : "compatibility"); 2444 2445 /* 2446 * with a CMD PCI64x, if we get here, the first channel is enabled: 2447 * there's no way to disable the first channel without disabling 2448 * the whole device 2449 */ 2450 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2451 printf("%s: %s channel ignored (disabled)\n", 2452 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2453 return; 2454 } 2455 2456 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2457 if (cp->hw_ok == 0) 2458 return; 2459 if (channel == 1) { 2460 if (pciide_chan_candisable(cp)) { 2461 ctrl &= ~CMD_CTRL_2PORT; 2462 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2463 CMD_CTRL, ctrl); 2464 } 2465 } 2466 pciide_map_compat_intr(pa, cp, channel, interface); 2467 } 2468 2469 int 2470 cmd_pci_intr(arg) 2471 void *arg; 2472 { 2473 struct pciide_softc *sc = arg; 2474 struct pciide_channel *cp; 2475 struct channel_softc *wdc_cp; 2476 int i, rv, crv; 2477 u_int32_t priirq, secirq; 2478 2479 rv = 0; 2480 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2481 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2482 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2483 cp = &sc->pciide_channels[i]; 2484 wdc_cp = &cp->wdc_channel; 2485 /* If a compat channel skip. */ 2486 if (cp->compat) 2487 continue; 2488 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2489 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2490 crv = wdcintr(wdc_cp); 2491 if (crv == 0) 2492 printf("%s:%d: bogus intr\n", 2493 sc->sc_wdcdev.sc_dev.dv_xname, i); 2494 else 2495 rv = 1; 2496 } 2497 } 2498 return rv; 2499 } 2500 2501 void 2502 cmd_chip_map(sc, pa) 2503 struct pciide_softc *sc; 2504 struct pci_attach_args *pa; 2505 { 2506 int channel; 2507 2508 /* 2509 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2510 * and base adresses registers can be disabled at 2511 * hardware level. In this case, the device is wired 2512 * in compat mode and its first channel is always enabled, 2513 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2514 * In fact, it seems that the first channel of the CMD PCI0640 2515 * can't be disabled. 2516 */ 2517 2518 #ifdef PCIIDE_CMD064x_DISABLE 2519 if (pciide_chipen(sc, pa) == 0) 2520 return; 2521 #endif 2522 2523 printf("%s: hardware does not support DMA\n", 2524 sc->sc_wdcdev.sc_dev.dv_xname); 2525 sc->sc_dma_ok = 0; 2526 2527 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2528 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2529 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2530 2531 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2532 cmd_channel_map(pa, sc, channel); 2533 } 2534 } 2535 2536 void 2537 cmd0643_9_chip_map(sc, pa) 2538 struct pciide_softc *sc; 2539 struct pci_attach_args *pa; 2540 { 2541 struct pciide_channel *cp; 2542 int channel; 2543 pcireg_t rev = PCI_REVISION(pa->pa_class); 2544 2545 /* 2546 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2547 * and base adresses registers can be disabled at 2548 * hardware level. In this case, the device is wired 2549 * in compat mode and its first channel is always enabled, 2550 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2551 * In fact, it seems that the first channel of the CMD PCI0640 2552 * can't be disabled. 2553 */ 2554 2555 #ifdef PCIIDE_CMD064x_DISABLE 2556 if (pciide_chipen(sc, pa) == 0) 2557 return; 2558 #endif 2559 printf("%s: bus-master DMA support present", 2560 sc->sc_wdcdev.sc_dev.dv_xname); 2561 pciide_mapreg_dma(sc, pa); 2562 printf("\n"); 2563 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2564 WDC_CAPABILITY_MODE; 2565 if (sc->sc_dma_ok) { 2566 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2567 switch (sc->sc_pp->ide_product) { 2568 case PCI_PRODUCT_CMDTECH_649: 2569 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2570 sc->sc_wdcdev.UDMA_cap = 5; 2571 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2572 break; 2573 case PCI_PRODUCT_CMDTECH_648: 2574 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2575 sc->sc_wdcdev.UDMA_cap = 4; 2576 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2577 break; 2578 case PCI_PRODUCT_CMDTECH_646: 2579 if (rev >= CMD0646U2_REV) { 2580 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2581 sc->sc_wdcdev.UDMA_cap = 2; 2582 } else if (rev >= CMD0646U_REV) { 2583 /* 2584 * Linux's driver claims that the 646U is broken 2585 * with UDMA. Only enable it if we know what we're 2586 * doing 2587 */ 2588 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2589 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2590 sc->sc_wdcdev.UDMA_cap = 2; 2591 #endif 2592 /* explicitly disable UDMA */ 2593 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2594 CMD_UDMATIM(0), 0); 2595 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2596 CMD_UDMATIM(1), 0); 2597 } 2598 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2599 break; 2600 default: 2601 sc->sc_wdcdev.irqack = pciide_irqack; 2602 } 2603 } 2604 2605 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2606 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2607 sc->sc_wdcdev.PIO_cap = 4; 2608 sc->sc_wdcdev.DMA_cap = 2; 2609 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2610 2611 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2612 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2613 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2614 DEBUG_PROBE); 2615 2616 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2617 cp = &sc->pciide_channels[channel]; 2618 cmd_channel_map(pa, sc, channel); 2619 if (cp->hw_ok == 0) 2620 continue; 2621 cmd0643_9_setup_channel(&cp->wdc_channel); 2622 } 2623 /* 2624 * note - this also makes sure we clear the irq disable and reset 2625 * bits 2626 */ 2627 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2628 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2629 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2630 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2631 DEBUG_PROBE); 2632 } 2633 2634 void 2635 cmd0643_9_setup_channel(chp) 2636 struct channel_softc *chp; 2637 { 2638 struct ata_drive_datas *drvp; 2639 u_int8_t tim; 2640 u_int32_t idedma_ctl, udma_reg; 2641 int drive; 2642 struct pciide_channel *cp = (struct pciide_channel*)chp; 2643 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2644 2645 idedma_ctl = 0; 2646 /* setup DMA if needed */ 2647 pciide_channel_dma_setup(cp); 2648 2649 for (drive = 0; drive < 2; drive++) { 2650 drvp = &chp->ch_drive[drive]; 2651 /* If no drive, skip */ 2652 if ((drvp->drive_flags & DRIVE) == 0) 2653 continue; 2654 /* add timing values, setup DMA if needed */ 2655 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2656 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2657 if (drvp->drive_flags & DRIVE_UDMA) { 2658 /* UltraDMA on a 646U2, 0648 or 0649 */ 2659 drvp->drive_flags &= ~DRIVE_DMA; 2660 udma_reg = pciide_pci_read(sc->sc_pc, 2661 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2662 if (drvp->UDMA_mode > 2 && 2663 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2664 CMD_BICSR) & 2665 CMD_BICSR_80(chp->channel)) == 0) 2666 drvp->UDMA_mode = 2; 2667 if (drvp->UDMA_mode > 2) 2668 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2669 else if (sc->sc_wdcdev.UDMA_cap > 2) 2670 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2671 udma_reg |= CMD_UDMATIM_UDMA(drive); 2672 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2673 CMD_UDMATIM_TIM_OFF(drive)); 2674 udma_reg |= 2675 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 2676 CMD_UDMATIM_TIM_OFF(drive)); 2677 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2678 CMD_UDMATIM(chp->channel), udma_reg); 2679 } else { 2680 /* 2681 * use Multiword DMA. 2682 * Timings will be used for both PIO and DMA, 2683 * so adjust DMA mode if needed 2684 * if we have a 0646U2/8/9, turn off UDMA 2685 */ 2686 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2687 udma_reg = pciide_pci_read(sc->sc_pc, 2688 sc->sc_tag, 2689 CMD_UDMATIM(chp->channel)); 2690 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2691 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2692 CMD_UDMATIM(chp->channel), 2693 udma_reg); 2694 } 2695 if (drvp->PIO_mode >= 3 && 2696 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2697 drvp->DMA_mode = drvp->PIO_mode - 2; 2698 } 2699 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2700 } 2701 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2702 } 2703 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2704 CMD_DATA_TIM(chp->channel, drive), tim); 2705 } 2706 if (idedma_ctl != 0) { 2707 /* Add software bits in status register */ 2708 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2709 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2710 idedma_ctl); 2711 } 2712 pciide_print_modes(cp); 2713 } 2714 2715 void 2716 cmd646_9_irqack(chp) 2717 struct channel_softc *chp; 2718 { 2719 u_int32_t priirq, secirq; 2720 struct pciide_channel *cp = (struct pciide_channel*)chp; 2721 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2722 2723 if (chp->channel == 0) { 2724 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2725 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2726 } else { 2727 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2728 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2729 } 2730 pciide_irqack(chp); 2731 } 2732 2733 void 2734 cmd680_chip_map(sc, pa) 2735 struct pciide_softc *sc; 2736 struct pci_attach_args *pa; 2737 { 2738 struct pciide_channel *cp; 2739 int channel; 2740 2741 if (pciide_chipen(sc, pa) == 0) 2742 return; 2743 printf("%s: bus-master DMA support present", 2744 sc->sc_wdcdev.sc_dev.dv_xname); 2745 pciide_mapreg_dma(sc, pa); 2746 printf("\n"); 2747 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2748 WDC_CAPABILITY_MODE; 2749 if (sc->sc_dma_ok) { 2750 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2751 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2752 sc->sc_wdcdev.UDMA_cap = 6; 2753 sc->sc_wdcdev.irqack = pciide_irqack; 2754 } 2755 2756 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2757 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2758 sc->sc_wdcdev.PIO_cap = 4; 2759 sc->sc_wdcdev.DMA_cap = 2; 2760 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 2761 2762 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 2763 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 2764 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 2765 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 2766 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2767 cp = &sc->pciide_channels[channel]; 2768 cmd680_channel_map(pa, sc, channel); 2769 if (cp->hw_ok == 0) 2770 continue; 2771 cmd680_setup_channel(&cp->wdc_channel); 2772 } 2773 } 2774 2775 void 2776 cmd680_channel_map(pa, sc, channel) 2777 struct pci_attach_args *pa; 2778 struct pciide_softc *sc; 2779 int channel; 2780 { 2781 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2782 bus_size_t cmdsize, ctlsize; 2783 int interface, i, reg; 2784 static const u_int8_t init_val[] = 2785 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 2786 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 2787 2788 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2789 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2790 PCIIDE_INTERFACE_SETTABLE(1); 2791 interface |= PCIIDE_INTERFACE_PCI(0) | 2792 PCIIDE_INTERFACE_PCI(1); 2793 } else { 2794 interface = PCI_INTERFACE(pa->pa_class); 2795 } 2796 2797 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2798 cp->name = PCIIDE_CHANNEL_NAME(channel); 2799 cp->wdc_channel.channel = channel; 2800 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2801 2802 cp->wdc_channel.ch_queue = 2803 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2804 if (cp->wdc_channel.ch_queue == NULL) { 2805 printf("%s %s channel: " 2806 "can't allocate memory for command queue", 2807 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2808 return; 2809 } 2810 2811 /* XXX */ 2812 reg = 0xa2 + channel * 16; 2813 for (i = 0; i < sizeof(init_val); i++) 2814 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 2815 2816 printf("%s: %s channel %s to %s mode\n", 2817 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2818 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2819 "configured" : "wired", 2820 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2821 "native-PCI" : "compatibility"); 2822 2823 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 2824 if (cp->hw_ok == 0) 2825 return; 2826 pciide_map_compat_intr(pa, cp, channel, interface); 2827 } 2828 2829 void 2830 cmd680_setup_channel(chp) 2831 struct channel_softc *chp; 2832 { 2833 struct ata_drive_datas *drvp; 2834 u_int8_t mode, off, scsc; 2835 u_int16_t val; 2836 u_int32_t idedma_ctl; 2837 int drive; 2838 struct pciide_channel *cp = (struct pciide_channel*)chp; 2839 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2840 pci_chipset_tag_t pc = sc->sc_pc; 2841 pcitag_t pa = sc->sc_tag; 2842 static const u_int8_t udma2_tbl[] = 2843 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 2844 static const u_int8_t udma_tbl[] = 2845 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 2846 static const u_int16_t dma_tbl[] = 2847 { 0x2208, 0x10c2, 0x10c1 }; 2848 static const u_int16_t pio_tbl[] = 2849 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 2850 2851 idedma_ctl = 0; 2852 pciide_channel_dma_setup(cp); 2853 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 2854 2855 for (drive = 0; drive < 2; drive++) { 2856 drvp = &chp->ch_drive[drive]; 2857 /* If no drive, skip */ 2858 if ((drvp->drive_flags & DRIVE) == 0) 2859 continue; 2860 mode &= ~(0x03 << (drive * 4)); 2861 if (drvp->drive_flags & DRIVE_UDMA) { 2862 drvp->drive_flags &= ~DRIVE_DMA; 2863 off = 0xa0 + chp->channel * 16; 2864 if (drvp->UDMA_mode > 2 && 2865 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 2866 drvp->UDMA_mode = 2; 2867 scsc = pciide_pci_read(pc, pa, 0x8a); 2868 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 2869 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 2870 scsc = pciide_pci_read(pc, pa, 0x8a); 2871 if ((scsc & 0x30) == 0) 2872 drvp->UDMA_mode = 5; 2873 } 2874 mode |= 0x03 << (drive * 4); 2875 off = 0xac + chp->channel * 16 + drive * 2; 2876 val = pciide_pci_read(pc, pa, off) & ~0x3f; 2877 if (scsc & 0x30) 2878 val |= udma2_tbl[drvp->UDMA_mode]; 2879 else 2880 val |= udma_tbl[drvp->UDMA_mode]; 2881 pciide_pci_write(pc, pa, off, val); 2882 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2883 } else if (drvp->drive_flags & DRIVE_DMA) { 2884 mode |= 0x02 << (drive * 4); 2885 off = 0xa8 + chp->channel * 16 + drive * 2; 2886 val = dma_tbl[drvp->DMA_mode]; 2887 pciide_pci_write(pc, pa, off, val & 0xff); 2888 pciide_pci_write(pc, pa, off, val >> 8); 2889 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2890 } else { 2891 mode |= 0x01 << (drive * 4); 2892 off = 0xa4 + chp->channel * 16 + drive * 2; 2893 val = pio_tbl[drvp->PIO_mode]; 2894 pciide_pci_write(pc, pa, off, val & 0xff); 2895 pciide_pci_write(pc, pa, off, val >> 8); 2896 } 2897 } 2898 2899 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 2900 if (idedma_ctl != 0) { 2901 /* Add software bits in status register */ 2902 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2903 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2904 idedma_ctl); 2905 } 2906 pciide_print_modes(cp); 2907 } 2908 2909 void 2910 cy693_chip_map(sc, pa) 2911 struct pciide_softc *sc; 2912 struct pci_attach_args *pa; 2913 { 2914 struct pciide_channel *cp; 2915 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2916 bus_size_t cmdsize, ctlsize; 2917 2918 if (pciide_chipen(sc, pa) == 0) 2919 return; 2920 /* 2921 * this chip has 2 PCI IDE functions, one for primary and one for 2922 * secondary. So we need to call pciide_mapregs_compat() with 2923 * the real channel 2924 */ 2925 if (pa->pa_function == 1) { 2926 sc->sc_cy_compatchan = 0; 2927 } else if (pa->pa_function == 2) { 2928 sc->sc_cy_compatchan = 1; 2929 } else { 2930 printf("%s: unexpected PCI function %d\n", 2931 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2932 return; 2933 } 2934 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2935 printf("%s: bus-master DMA support present", 2936 sc->sc_wdcdev.sc_dev.dv_xname); 2937 pciide_mapreg_dma(sc, pa); 2938 } else { 2939 printf("%s: hardware does not support DMA", 2940 sc->sc_wdcdev.sc_dev.dv_xname); 2941 sc->sc_dma_ok = 0; 2942 } 2943 printf("\n"); 2944 2945 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 2946 if (sc->sc_cy_handle == NULL) { 2947 printf("%s: unable to map hyperCache control registers\n", 2948 sc->sc_wdcdev.sc_dev.dv_xname); 2949 sc->sc_dma_ok = 0; 2950 } 2951 2952 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2953 WDC_CAPABILITY_MODE; 2954 if (sc->sc_dma_ok) { 2955 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2956 sc->sc_wdcdev.irqack = pciide_irqack; 2957 } 2958 sc->sc_wdcdev.PIO_cap = 4; 2959 sc->sc_wdcdev.DMA_cap = 2; 2960 sc->sc_wdcdev.set_modes = cy693_setup_channel; 2961 2962 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2963 sc->sc_wdcdev.nchannels = 1; 2964 2965 /* Only one channel for this chip; if we are here it's enabled */ 2966 cp = &sc->pciide_channels[0]; 2967 sc->wdc_chanarray[0] = &cp->wdc_channel; 2968 cp->name = PCIIDE_CHANNEL_NAME(0); 2969 cp->wdc_channel.channel = 0; 2970 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2971 cp->wdc_channel.ch_queue = 2972 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2973 if (cp->wdc_channel.ch_queue == NULL) { 2974 printf("%s primary channel: " 2975 "can't allocate memory for command queue", 2976 sc->sc_wdcdev.sc_dev.dv_xname); 2977 return; 2978 } 2979 printf("%s: primary channel %s to ", 2980 sc->sc_wdcdev.sc_dev.dv_xname, 2981 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 2982 "configured" : "wired"); 2983 if (interface & PCIIDE_INTERFACE_PCI(0)) { 2984 printf("native-PCI"); 2985 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 2986 pciide_pci_intr); 2987 } else { 2988 printf("compatibility"); 2989 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 2990 &cmdsize, &ctlsize); 2991 } 2992 printf(" mode\n"); 2993 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2994 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2995 wdcattach(&cp->wdc_channel); 2996 if (pciide_chan_candisable(cp)) { 2997 pci_conf_write(sc->sc_pc, sc->sc_tag, 2998 PCI_COMMAND_STATUS_REG, 0); 2999 } 3000 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 3001 if (cp->hw_ok == 0) 3002 return; 3003 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 3004 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 3005 cy693_setup_channel(&cp->wdc_channel); 3006 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 3007 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 3008 } 3009 3010 void 3011 cy693_setup_channel(chp) 3012 struct channel_softc *chp; 3013 { 3014 struct ata_drive_datas *drvp; 3015 int drive; 3016 u_int32_t cy_cmd_ctrl; 3017 u_int32_t idedma_ctl; 3018 struct pciide_channel *cp = (struct pciide_channel*)chp; 3019 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3020 int dma_mode = -1; 3021 3022 cy_cmd_ctrl = idedma_ctl = 0; 3023 3024 /* setup DMA if needed */ 3025 pciide_channel_dma_setup(cp); 3026 3027 for (drive = 0; drive < 2; drive++) { 3028 drvp = &chp->ch_drive[drive]; 3029 /* If no drive, skip */ 3030 if ((drvp->drive_flags & DRIVE) == 0) 3031 continue; 3032 /* add timing values, setup DMA if needed */ 3033 if (drvp->drive_flags & DRIVE_DMA) { 3034 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3035 /* use Multiword DMA */ 3036 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 3037 dma_mode = drvp->DMA_mode; 3038 } 3039 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 3040 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 3041 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 3042 CY_CMD_CTRL_IOW_REC_OFF(drive)); 3043 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 3044 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 3045 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 3046 CY_CMD_CTRL_IOR_REC_OFF(drive)); 3047 } 3048 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 3049 chp->ch_drive[0].DMA_mode = dma_mode; 3050 chp->ch_drive[1].DMA_mode = dma_mode; 3051 3052 if (dma_mode == -1) 3053 dma_mode = 0; 3054 3055 if (sc->sc_cy_handle != NULL) { 3056 /* Note: `multiple' is implied. */ 3057 cy82c693_write(sc->sc_cy_handle, 3058 (sc->sc_cy_compatchan == 0) ? 3059 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 3060 } 3061 3062 pciide_print_modes(cp); 3063 3064 if (idedma_ctl != 0) { 3065 /* Add software bits in status register */ 3066 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3067 IDEDMA_CTL, idedma_ctl); 3068 } 3069 } 3070 3071 static int 3072 sis_hostbr_match(pa) 3073 struct pci_attach_args *pa; 3074 { 3075 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) && 3076 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) || 3077 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) || 3078 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) || 3079 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) || 3080 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745))); 3081 } 3082 3083 void 3084 sis_chip_map(sc, pa) 3085 struct pciide_softc *sc; 3086 struct pci_attach_args *pa; 3087 { 3088 struct pciide_channel *cp; 3089 int channel; 3090 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 3091 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3092 pcireg_t rev = PCI_REVISION(pa->pa_class); 3093 bus_size_t cmdsize, ctlsize; 3094 pcitag_t pchb_tag; 3095 pcireg_t pchb_id, pchb_class; 3096 3097 if (pciide_chipen(sc, pa) == 0) 3098 return; 3099 printf("%s: bus-master DMA support present", 3100 sc->sc_wdcdev.sc_dev.dv_xname); 3101 pciide_mapreg_dma(sc, pa); 3102 printf("\n"); 3103 3104 /* get a PCI tag for the host bridge (function 0 of the same device) */ 3105 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3106 /* and read ID and rev of the ISA bridge */ 3107 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG); 3108 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG); 3109 3110 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3111 WDC_CAPABILITY_MODE; 3112 if (sc->sc_dma_ok) { 3113 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3114 sc->sc_wdcdev.irqack = pciide_irqack; 3115 /* 3116 * controllers associated to a rev 0x2 530 Host to PCI Bridge 3117 * have problems with UDMA (info provided by Christos) 3118 */ 3119 if (rev >= 0xd0 && 3120 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB || 3121 PCI_REVISION(pchb_class) >= 0x03)) 3122 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3123 } 3124 3125 sc->sc_wdcdev.PIO_cap = 4; 3126 sc->sc_wdcdev.DMA_cap = 2; 3127 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) 3128 /* 3129 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other 3130 * chipsets. 3131 */ 3132 sc->sc_wdcdev.UDMA_cap = 3133 pci_find_device(pa, sis_hostbr_match) ? 5 : 2; 3134 sc->sc_wdcdev.set_modes = sis_setup_channel; 3135 3136 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3137 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3138 3139 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 3140 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 3141 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE); 3142 3143 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3144 cp = &sc->pciide_channels[channel]; 3145 if (pciide_chansetup(sc, channel, interface) == 0) 3146 continue; 3147 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 3148 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 3149 printf("%s: %s channel ignored (disabled)\n", 3150 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3151 continue; 3152 } 3153 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3154 pciide_pci_intr); 3155 if (cp->hw_ok == 0) 3156 continue; 3157 if (pciide_chan_candisable(cp)) { 3158 if (channel == 0) 3159 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 3160 else 3161 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 3162 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 3163 sis_ctr0); 3164 } 3165 pciide_map_compat_intr(pa, cp, channel, interface); 3166 if (cp->hw_ok == 0) 3167 continue; 3168 sis_setup_channel(&cp->wdc_channel); 3169 } 3170 } 3171 3172 void 3173 sis_setup_channel(chp) 3174 struct channel_softc *chp; 3175 { 3176 struct ata_drive_datas *drvp; 3177 int drive; 3178 u_int32_t sis_tim; 3179 u_int32_t idedma_ctl; 3180 struct pciide_channel *cp = (struct pciide_channel*)chp; 3181 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3182 3183 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 3184 "channel %d 0x%x\n", chp->channel, 3185 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 3186 DEBUG_PROBE); 3187 sis_tim = 0; 3188 idedma_ctl = 0; 3189 /* setup DMA if needed */ 3190 pciide_channel_dma_setup(cp); 3191 3192 for (drive = 0; drive < 2; drive++) { 3193 drvp = &chp->ch_drive[drive]; 3194 /* If no drive, skip */ 3195 if ((drvp->drive_flags & DRIVE) == 0) 3196 continue; 3197 /* add timing values, setup DMA if needed */ 3198 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 3199 (drvp->drive_flags & DRIVE_UDMA) == 0) 3200 goto pio; 3201 3202 if (drvp->drive_flags & DRIVE_UDMA) { 3203 /* use Ultra/DMA */ 3204 drvp->drive_flags &= ~DRIVE_DMA; 3205 sis_tim |= sis_udma_tim[drvp->UDMA_mode] << 3206 SIS_TIM_UDMA_TIME_OFF(drive); 3207 sis_tim |= SIS_TIM_UDMA_EN(drive); 3208 } else { 3209 /* 3210 * use Multiword DMA 3211 * Timings will be used for both PIO and DMA, 3212 * so adjust DMA mode if needed 3213 */ 3214 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3215 drvp->PIO_mode = drvp->DMA_mode + 2; 3216 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3217 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3218 drvp->PIO_mode - 2 : 0; 3219 if (drvp->DMA_mode == 0) 3220 drvp->PIO_mode = 0; 3221 } 3222 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3223 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] << 3224 SIS_TIM_ACT_OFF(drive); 3225 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 3226 SIS_TIM_REC_OFF(drive); 3227 } 3228 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 3229 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 3230 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 3231 if (idedma_ctl != 0) { 3232 /* Add software bits in status register */ 3233 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3234 IDEDMA_CTL, idedma_ctl); 3235 } 3236 pciide_print_modes(cp); 3237 } 3238 3239 void 3240 acer_chip_map(sc, pa) 3241 struct pciide_softc *sc; 3242 struct pci_attach_args *pa; 3243 { 3244 struct pciide_channel *cp; 3245 int channel; 3246 pcireg_t cr, interface; 3247 bus_size_t cmdsize, ctlsize; 3248 pcireg_t rev = PCI_REVISION(pa->pa_class); 3249 3250 if (pciide_chipen(sc, pa) == 0) 3251 return; 3252 printf("%s: bus-master DMA support present", 3253 sc->sc_wdcdev.sc_dev.dv_xname); 3254 pciide_mapreg_dma(sc, pa); 3255 printf("\n"); 3256 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3257 WDC_CAPABILITY_MODE; 3258 if (sc->sc_dma_ok) { 3259 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 3260 if (rev >= 0x20) { 3261 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3262 if (rev >= 0xC4) 3263 sc->sc_wdcdev.UDMA_cap = 5; 3264 else if (rev >= 0xC2) 3265 sc->sc_wdcdev.UDMA_cap = 4; 3266 else 3267 sc->sc_wdcdev.UDMA_cap = 2; 3268 } 3269 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3270 sc->sc_wdcdev.irqack = pciide_irqack; 3271 } 3272 3273 sc->sc_wdcdev.PIO_cap = 4; 3274 sc->sc_wdcdev.DMA_cap = 2; 3275 sc->sc_wdcdev.set_modes = acer_setup_channel; 3276 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3277 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3278 3279 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 3280 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 3281 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 3282 3283 /* Enable "microsoft register bits" R/W. */ 3284 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 3285 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 3286 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 3287 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 3288 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 3289 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 3290 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 3291 ~ACER_CHANSTATUSREGS_RO); 3292 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 3293 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 3294 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 3295 /* Don't use cr, re-read the real register content instead */ 3296 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 3297 PCI_CLASS_REG)); 3298 3299 /* From linux: enable "Cable Detection" */ 3300 if (rev >= 0xC2) { 3301 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 3302 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 3303 | ACER_0x4B_CDETECT); 3304 } 3305 3306 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3307 cp = &sc->pciide_channels[channel]; 3308 if (pciide_chansetup(sc, channel, interface) == 0) 3309 continue; 3310 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 3311 printf("%s: %s channel ignored (disabled)\n", 3312 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3313 continue; 3314 } 3315 /* newer controllers seems to lack the ACER_CHIDS. Sigh */ 3316 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3317 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 3318 if (cp->hw_ok == 0) 3319 continue; 3320 if (pciide_chan_candisable(cp)) { 3321 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 3322 pci_conf_write(sc->sc_pc, sc->sc_tag, 3323 PCI_CLASS_REG, cr); 3324 } 3325 pciide_map_compat_intr(pa, cp, channel, interface); 3326 acer_setup_channel(&cp->wdc_channel); 3327 } 3328 } 3329 3330 void 3331 acer_setup_channel(chp) 3332 struct channel_softc *chp; 3333 { 3334 struct ata_drive_datas *drvp; 3335 int drive; 3336 u_int32_t acer_fifo_udma; 3337 u_int32_t idedma_ctl; 3338 struct pciide_channel *cp = (struct pciide_channel*)chp; 3339 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3340 3341 idedma_ctl = 0; 3342 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 3343 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 3344 acer_fifo_udma), DEBUG_PROBE); 3345 /* setup DMA if needed */ 3346 pciide_channel_dma_setup(cp); 3347 3348 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 3349 DRIVE_UDMA) { /* check 80 pins cable */ 3350 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 3351 ACER_0x4A_80PIN(chp->channel)) { 3352 if (chp->ch_drive[0].UDMA_mode > 2) 3353 chp->ch_drive[0].UDMA_mode = 2; 3354 if (chp->ch_drive[1].UDMA_mode > 2) 3355 chp->ch_drive[1].UDMA_mode = 2; 3356 } 3357 } 3358 3359 for (drive = 0; drive < 2; drive++) { 3360 drvp = &chp->ch_drive[drive]; 3361 /* If no drive, skip */ 3362 if ((drvp->drive_flags & DRIVE) == 0) 3363 continue; 3364 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 3365 "channel %d drive %d 0x%x\n", chp->channel, drive, 3366 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3367 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 3368 /* clear FIFO/DMA mode */ 3369 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 3370 ACER_UDMA_EN(chp->channel, drive) | 3371 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 3372 3373 /* add timing values, setup DMA if needed */ 3374 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 3375 (drvp->drive_flags & DRIVE_UDMA) == 0) { 3376 acer_fifo_udma |= 3377 ACER_FTH_OPL(chp->channel, drive, 0x1); 3378 goto pio; 3379 } 3380 3381 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 3382 if (drvp->drive_flags & DRIVE_UDMA) { 3383 /* use Ultra/DMA */ 3384 drvp->drive_flags &= ~DRIVE_DMA; 3385 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 3386 acer_fifo_udma |= 3387 ACER_UDMA_TIM(chp->channel, drive, 3388 acer_udma[drvp->UDMA_mode]); 3389 /* XXX disable if one drive < UDMA3 ? */ 3390 if (drvp->UDMA_mode >= 3) { 3391 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3392 ACER_0x4B, 3393 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3394 ACER_0x4B) | ACER_0x4B_UDMA66); 3395 } 3396 } else { 3397 /* 3398 * use Multiword DMA 3399 * Timings will be used for both PIO and DMA, 3400 * so adjust DMA mode if needed 3401 */ 3402 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3403 drvp->PIO_mode = drvp->DMA_mode + 2; 3404 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3405 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3406 drvp->PIO_mode - 2 : 0; 3407 if (drvp->DMA_mode == 0) 3408 drvp->PIO_mode = 0; 3409 } 3410 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3411 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 3412 ACER_IDETIM(chp->channel, drive), 3413 acer_pio[drvp->PIO_mode]); 3414 } 3415 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 3416 acer_fifo_udma), DEBUG_PROBE); 3417 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 3418 if (idedma_ctl != 0) { 3419 /* Add software bits in status register */ 3420 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3421 IDEDMA_CTL, idedma_ctl); 3422 } 3423 pciide_print_modes(cp); 3424 } 3425 3426 int 3427 acer_pci_intr(arg) 3428 void *arg; 3429 { 3430 struct pciide_softc *sc = arg; 3431 struct pciide_channel *cp; 3432 struct channel_softc *wdc_cp; 3433 int i, rv, crv; 3434 u_int32_t chids; 3435 3436 rv = 0; 3437 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 3438 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3439 cp = &sc->pciide_channels[i]; 3440 wdc_cp = &cp->wdc_channel; 3441 /* If a compat channel skip. */ 3442 if (cp->compat) 3443 continue; 3444 if (chids & ACER_CHIDS_INT(i)) { 3445 crv = wdcintr(wdc_cp); 3446 if (crv == 0) 3447 printf("%s:%d: bogus intr\n", 3448 sc->sc_wdcdev.sc_dev.dv_xname, i); 3449 else 3450 rv = 1; 3451 } 3452 } 3453 return rv; 3454 } 3455 3456 void 3457 hpt_chip_map(sc, pa) 3458 struct pciide_softc *sc; 3459 struct pci_attach_args *pa; 3460 { 3461 struct pciide_channel *cp; 3462 int i, compatchan, revision; 3463 pcireg_t interface; 3464 bus_size_t cmdsize, ctlsize; 3465 3466 if (pciide_chipen(sc, pa) == 0) 3467 return; 3468 revision = PCI_REVISION(pa->pa_class); 3469 printf(": Triones/Highpoint "); 3470 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3471 printf("HPT374 IDE Controller\n"); 3472 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372) 3473 printf("HPT372 IDE Controller\n"); 3474 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 3475 if (revision == HPT372_REV) 3476 printf("HPT372 IDE Controller\n"); 3477 else if (revision == HPT370_REV) 3478 printf("HPT370 IDE Controller\n"); 3479 else if (revision == HPT370A_REV) 3480 printf("HPT370A IDE Controller\n"); 3481 else if (revision == HPT366_REV) 3482 printf("HPT366 IDE Controller\n"); 3483 else 3484 printf("unknown HPT IDE controller rev %d\n", revision); 3485 } else 3486 printf("unknown HPT IDE controller 0x%x\n", 3487 sc->sc_pp->ide_product); 3488 3489 /* 3490 * when the chip is in native mode it identifies itself as a 3491 * 'misc mass storage'. Fake interface in this case. 3492 */ 3493 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3494 interface = PCI_INTERFACE(pa->pa_class); 3495 } else { 3496 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3497 PCIIDE_INTERFACE_PCI(0); 3498 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3499 (revision == HPT370_REV || revision == HPT370A_REV || 3500 revision == HPT372_REV)) || 3501 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 || 3502 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3503 interface |= PCIIDE_INTERFACE_PCI(1); 3504 } 3505 3506 printf("%s: bus-master DMA support present", 3507 sc->sc_wdcdev.sc_dev.dv_xname); 3508 pciide_mapreg_dma(sc, pa); 3509 printf("\n"); 3510 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3511 WDC_CAPABILITY_MODE; 3512 if (sc->sc_dma_ok) { 3513 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3514 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3515 sc->sc_wdcdev.irqack = pciide_irqack; 3516 } 3517 sc->sc_wdcdev.PIO_cap = 4; 3518 sc->sc_wdcdev.DMA_cap = 2; 3519 3520 sc->sc_wdcdev.set_modes = hpt_setup_channel; 3521 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3522 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3523 revision == HPT366_REV) { 3524 sc->sc_wdcdev.UDMA_cap = 4; 3525 /* 3526 * The 366 has 2 PCI IDE functions, one for primary and one 3527 * for secondary. So we need to call pciide_mapregs_compat() 3528 * with the real channel 3529 */ 3530 if (pa->pa_function == 0) { 3531 compatchan = 0; 3532 } else if (pa->pa_function == 1) { 3533 compatchan = 1; 3534 } else { 3535 printf("%s: unexpected PCI function %d\n", 3536 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 3537 return; 3538 } 3539 sc->sc_wdcdev.nchannels = 1; 3540 } else { 3541 sc->sc_wdcdev.nchannels = 2; 3542 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 3543 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 || 3544 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3545 revision == HPT372_REV)) 3546 sc->sc_wdcdev.UDMA_cap = 6; 3547 else 3548 sc->sc_wdcdev.UDMA_cap = 5; 3549 } 3550 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3551 cp = &sc->pciide_channels[i]; 3552 if (sc->sc_wdcdev.nchannels > 1) { 3553 compatchan = i; 3554 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 3555 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 3556 printf("%s: %s channel ignored (disabled)\n", 3557 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3558 continue; 3559 } 3560 } 3561 if (pciide_chansetup(sc, i, interface) == 0) 3562 continue; 3563 if (interface & PCIIDE_INTERFACE_PCI(i)) { 3564 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 3565 &ctlsize, hpt_pci_intr); 3566 } else { 3567 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 3568 &cmdsize, &ctlsize); 3569 } 3570 if (cp->hw_ok == 0) 3571 return; 3572 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 3573 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 3574 wdcattach(&cp->wdc_channel); 3575 hpt_setup_channel(&cp->wdc_channel); 3576 } 3577 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3578 (revision == HPT370_REV || revision == HPT370A_REV || 3579 revision == HPT372_REV)) || 3580 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 || 3581 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 3582 /* 3583 * HPT370_REV and highter has a bit to disable interrupts, 3584 * make sure to clear it 3585 */ 3586 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 3587 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 3588 ~HPT_CSEL_IRQDIS); 3589 } 3590 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 3591 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 3592 revision == HPT372_REV ) || 3593 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 || 3594 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 3595 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 3596 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 3597 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 3598 return; 3599 } 3600 3601 void 3602 hpt_setup_channel(chp) 3603 struct channel_softc *chp; 3604 { 3605 struct ata_drive_datas *drvp; 3606 int drive; 3607 int cable; 3608 u_int32_t before, after; 3609 u_int32_t idedma_ctl; 3610 struct pciide_channel *cp = (struct pciide_channel*)chp; 3611 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3612 int revision = 3613 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 3614 3615 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 3616 3617 /* setup DMA if needed */ 3618 pciide_channel_dma_setup(cp); 3619 3620 idedma_ctl = 0; 3621 3622 /* Per drive settings */ 3623 for (drive = 0; drive < 2; drive++) { 3624 drvp = &chp->ch_drive[drive]; 3625 /* If no drive, skip */ 3626 if ((drvp->drive_flags & DRIVE) == 0) 3627 continue; 3628 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 3629 HPT_IDETIM(chp->channel, drive)); 3630 3631 /* add timing values, setup DMA if needed */ 3632 if (drvp->drive_flags & DRIVE_UDMA) { 3633 /* use Ultra/DMA */ 3634 drvp->drive_flags &= ~DRIVE_DMA; 3635 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 3636 drvp->UDMA_mode > 2) 3637 drvp->UDMA_mode = 2; 3638 switch (sc->sc_pp->ide_product) { 3639 case PCI_PRODUCT_TRIONES_HPT374: 3640 after = hpt374_udma[drvp->UDMA_mode]; 3641 break; 3642 case PCI_PRODUCT_TRIONES_HPT372: 3643 after = hpt372_udma[drvp->UDMA_mode]; 3644 break; 3645 case PCI_PRODUCT_TRIONES_HPT366: 3646 default: 3647 switch(revision) { 3648 case HPT372_REV: 3649 after = hpt372_udma[drvp->UDMA_mode]; 3650 break; 3651 case HPT370_REV: 3652 case HPT370A_REV: 3653 after = hpt370_udma[drvp->UDMA_mode]; 3654 break; 3655 case HPT366_REV: 3656 default: 3657 after = hpt366_udma[drvp->UDMA_mode]; 3658 break; 3659 } 3660 } 3661 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3662 } else if (drvp->drive_flags & DRIVE_DMA) { 3663 /* 3664 * use Multiword DMA. 3665 * Timings will be used for both PIO and DMA, so adjust 3666 * DMA mode if needed 3667 */ 3668 if (drvp->PIO_mode >= 3 && 3669 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3670 drvp->DMA_mode = drvp->PIO_mode - 2; 3671 } 3672 switch (sc->sc_pp->ide_product) { 3673 case PCI_PRODUCT_TRIONES_HPT374: 3674 after = hpt374_dma[drvp->DMA_mode]; 3675 break; 3676 case PCI_PRODUCT_TRIONES_HPT372: 3677 after = hpt372_dma[drvp->DMA_mode]; 3678 break; 3679 case PCI_PRODUCT_TRIONES_HPT366: 3680 default: 3681 switch(revision) { 3682 case HPT372_REV: 3683 after = hpt372_dma[drvp->DMA_mode]; 3684 break; 3685 case HPT370_REV: 3686 case HPT370A_REV: 3687 after = hpt370_dma[drvp->DMA_mode]; 3688 break; 3689 case HPT366_REV: 3690 default: 3691 after = hpt366_dma[drvp->DMA_mode]; 3692 break; 3693 } 3694 } 3695 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3696 } else { 3697 /* PIO only */ 3698 switch (sc->sc_pp->ide_product) { 3699 case PCI_PRODUCT_TRIONES_HPT374: 3700 after = hpt374_pio[drvp->PIO_mode]; 3701 break; 3702 case PCI_PRODUCT_TRIONES_HPT372: 3703 after = hpt372_pio[drvp->PIO_mode]; 3704 break; 3705 case PCI_PRODUCT_TRIONES_HPT366: 3706 default: 3707 switch(revision) { 3708 case HPT372_REV: 3709 after = hpt372_pio[drvp->PIO_mode]; 3710 break; 3711 case HPT370_REV: 3712 case HPT370A_REV: 3713 after = hpt370_pio[drvp->PIO_mode]; 3714 break; 3715 case HPT366_REV: 3716 default: 3717 after = hpt366_pio[drvp->PIO_mode]; 3718 break; 3719 } 3720 } 3721 } 3722 pci_conf_write(sc->sc_pc, sc->sc_tag, 3723 HPT_IDETIM(chp->channel, drive), after); 3724 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 3725 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname, 3726 after, before), DEBUG_PROBE); 3727 } 3728 if (idedma_ctl != 0) { 3729 /* Add software bits in status register */ 3730 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3731 IDEDMA_CTL, idedma_ctl); 3732 } 3733 pciide_print_modes(cp); 3734 } 3735 3736 int 3737 hpt_pci_intr(arg) 3738 void *arg; 3739 { 3740 struct pciide_softc *sc = arg; 3741 struct pciide_channel *cp; 3742 struct channel_softc *wdc_cp; 3743 int rv = 0; 3744 int dmastat, i, crv; 3745 3746 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3747 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3748 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3749 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 3750 IDEDMA_CTL_INTR) 3751 continue; 3752 cp = &sc->pciide_channels[i]; 3753 wdc_cp = &cp->wdc_channel; 3754 crv = wdcintr(wdc_cp); 3755 if (crv == 0) { 3756 printf("%s:%d: bogus intr\n", 3757 sc->sc_wdcdev.sc_dev.dv_xname, i); 3758 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3759 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 3760 } else 3761 rv = 1; 3762 } 3763 return rv; 3764 } 3765 3766 3767 /* Macros to test product */ 3768 #define PDC_IS_262(sc) \ 3769 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \ 3770 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3771 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \ 3772 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3773 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3774 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \ 3775 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \ 3776 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2) 3777 #define PDC_IS_265(sc) \ 3778 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3779 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \ 3780 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3781 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3782 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \ 3783 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \ 3784 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2) 3785 #define PDC_IS_268(sc) \ 3786 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \ 3787 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \ 3788 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \ 3789 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \ 3790 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2) 3791 #define PDC_IS_276(sc) \ 3792 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \ 3793 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \ 3794 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2) 3795 3796 void 3797 pdc202xx_chip_map(sc, pa) 3798 struct pciide_softc *sc; 3799 struct pci_attach_args *pa; 3800 { 3801 struct pciide_channel *cp; 3802 int channel; 3803 pcireg_t interface, st, mode; 3804 bus_size_t cmdsize, ctlsize; 3805 3806 if (!PDC_IS_268(sc)) { 3807 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3808 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 3809 st), DEBUG_PROBE); 3810 } 3811 if (pciide_chipen(sc, pa) == 0) 3812 return; 3813 3814 /* turn off RAID mode */ 3815 if (!PDC_IS_268(sc)) 3816 st &= ~PDC2xx_STATE_IDERAID; 3817 3818 /* 3819 * can't rely on the PCI_CLASS_REG content if the chip was in raid 3820 * mode. We have to fake interface 3821 */ 3822 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 3823 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 3824 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3825 3826 printf("%s: bus-master DMA support present", 3827 sc->sc_wdcdev.sc_dev.dv_xname); 3828 pciide_mapreg_dma(sc, pa); 3829 printf("\n"); 3830 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3831 WDC_CAPABILITY_MODE; 3832 if (sc->sc_dma_ok) { 3833 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3834 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3835 sc->sc_wdcdev.irqack = pciide_irqack; 3836 } 3837 sc->sc_wdcdev.PIO_cap = 4; 3838 sc->sc_wdcdev.DMA_cap = 2; 3839 if (PDC_IS_276(sc)) 3840 sc->sc_wdcdev.UDMA_cap = 6; 3841 else if (PDC_IS_265(sc)) 3842 sc->sc_wdcdev.UDMA_cap = 5; 3843 else if (PDC_IS_262(sc)) 3844 sc->sc_wdcdev.UDMA_cap = 4; 3845 else 3846 sc->sc_wdcdev.UDMA_cap = 2; 3847 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 3848 pdc20268_setup_channel : pdc202xx_setup_channel; 3849 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3850 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3851 3852 if (!PDC_IS_268(sc)) { 3853 /* setup failsafe defaults */ 3854 mode = 0; 3855 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 3856 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 3857 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 3858 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 3859 for (channel = 0; 3860 channel < sc->sc_wdcdev.nchannels; 3861 channel++) { 3862 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 3863 "drive 0 initial timings 0x%x, now 0x%x\n", 3864 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 3865 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 3866 DEBUG_PROBE); 3867 pci_conf_write(sc->sc_pc, sc->sc_tag, 3868 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 3869 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 3870 "drive 1 initial timings 0x%x, now 0x%x\n", 3871 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 3872 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 3873 pci_conf_write(sc->sc_pc, sc->sc_tag, 3874 PDC2xx_TIM(channel, 1), mode); 3875 } 3876 3877 mode = PDC2xx_SCR_DMA; 3878 if (PDC_IS_262(sc)) { 3879 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 3880 } else { 3881 /* the BIOS set it up this way */ 3882 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 3883 } 3884 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 3885 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 3886 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 3887 "now 0x%x\n", 3888 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3889 PDC2xx_SCR), 3890 mode), DEBUG_PROBE); 3891 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3892 PDC2xx_SCR, mode); 3893 3894 /* controller initial state register is OK even without BIOS */ 3895 /* Set DMA mode to IDE DMA compatibility */ 3896 mode = 3897 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 3898 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 3899 DEBUG_PROBE); 3900 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 3901 mode | 0x1); 3902 mode = 3903 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 3904 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 3905 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 3906 mode | 0x1); 3907 } 3908 3909 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3910 cp = &sc->pciide_channels[channel]; 3911 if (pciide_chansetup(sc, channel, interface) == 0) 3912 continue; 3913 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 3914 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 3915 printf("%s: %s channel ignored (disabled)\n", 3916 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3917 continue; 3918 } 3919 if (PDC_IS_265(sc)) 3920 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3921 pdc20265_pci_intr); 3922 else 3923 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3924 pdc202xx_pci_intr); 3925 if (cp->hw_ok == 0) 3926 continue; 3927 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) 3928 st &= ~(PDC_IS_262(sc) ? 3929 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 3930 pciide_map_compat_intr(pa, cp, channel, interface); 3931 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 3932 } 3933 if (!PDC_IS_268(sc)) { 3934 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 3935 "0x%x\n", st), DEBUG_PROBE); 3936 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 3937 } 3938 return; 3939 } 3940 3941 void 3942 pdc202xx_setup_channel(chp) 3943 struct channel_softc *chp; 3944 { 3945 struct ata_drive_datas *drvp; 3946 int drive; 3947 pcireg_t mode, st; 3948 u_int32_t idedma_ctl, scr, atapi; 3949 struct pciide_channel *cp = (struct pciide_channel*)chp; 3950 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3951 int channel = chp->channel; 3952 3953 /* setup DMA if needed */ 3954 pciide_channel_dma_setup(cp); 3955 3956 idedma_ctl = 0; 3957 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 3958 sc->sc_wdcdev.sc_dev.dv_xname, 3959 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 3960 DEBUG_PROBE); 3961 3962 /* Per channel settings */ 3963 if (PDC_IS_262(sc)) { 3964 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3965 PDC262_U66); 3966 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3967 /* Trim UDMA mode */ 3968 if ((st & PDC262_STATE_80P(channel)) != 0 || 3969 (chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3970 chp->ch_drive[0].UDMA_mode <= 2) || 3971 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3972 chp->ch_drive[1].UDMA_mode <= 2)) { 3973 if (chp->ch_drive[0].UDMA_mode > 2) 3974 chp->ch_drive[0].UDMA_mode = 2; 3975 if (chp->ch_drive[1].UDMA_mode > 2) 3976 chp->ch_drive[1].UDMA_mode = 2; 3977 } 3978 /* Set U66 if needed */ 3979 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3980 chp->ch_drive[0].UDMA_mode > 2) || 3981 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3982 chp->ch_drive[1].UDMA_mode > 2)) 3983 scr |= PDC262_U66_EN(channel); 3984 else 3985 scr &= ~PDC262_U66_EN(channel); 3986 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3987 PDC262_U66, scr); 3988 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 3989 sc->sc_wdcdev.sc_dev.dv_xname, channel, 3990 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3991 PDC262_ATAPI(channel))), DEBUG_PROBE); 3992 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 3993 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 3994 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3995 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3996 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 3997 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3998 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3999 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 4000 atapi = 0; 4001 else 4002 atapi = PDC262_ATAPI_UDMA; 4003 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4004 PDC262_ATAPI(channel), atapi); 4005 } 4006 } 4007 for (drive = 0; drive < 2; drive++) { 4008 drvp = &chp->ch_drive[drive]; 4009 /* If no drive, skip */ 4010 if ((drvp->drive_flags & DRIVE) == 0) 4011 continue; 4012 mode = 0; 4013 if (drvp->drive_flags & DRIVE_UDMA) { 4014 /* use Ultra/DMA */ 4015 drvp->drive_flags &= ~DRIVE_DMA; 4016 mode = PDC2xx_TIM_SET_MB(mode, 4017 pdc2xx_udma_mb[drvp->UDMA_mode]); 4018 mode = PDC2xx_TIM_SET_MC(mode, 4019 pdc2xx_udma_mc[drvp->UDMA_mode]); 4020 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4021 } else if (drvp->drive_flags & DRIVE_DMA) { 4022 mode = PDC2xx_TIM_SET_MB(mode, 4023 pdc2xx_dma_mb[drvp->DMA_mode]); 4024 mode = PDC2xx_TIM_SET_MC(mode, 4025 pdc2xx_dma_mc[drvp->DMA_mode]); 4026 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4027 } else { 4028 mode = PDC2xx_TIM_SET_MB(mode, 4029 pdc2xx_dma_mb[0]); 4030 mode = PDC2xx_TIM_SET_MC(mode, 4031 pdc2xx_dma_mc[0]); 4032 } 4033 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 4034 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 4035 if (drvp->drive_flags & DRIVE_ATA) 4036 mode |= PDC2xx_TIM_PRE; 4037 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 4038 if (drvp->PIO_mode >= 3) { 4039 mode |= PDC2xx_TIM_IORDY; 4040 if (drive == 0) 4041 mode |= PDC2xx_TIM_IORDYp; 4042 } 4043 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 4044 "timings 0x%x\n", 4045 sc->sc_wdcdev.sc_dev.dv_xname, 4046 chp->channel, drive, mode), DEBUG_PROBE); 4047 pci_conf_write(sc->sc_pc, sc->sc_tag, 4048 PDC2xx_TIM(chp->channel, drive), mode); 4049 } 4050 if (idedma_ctl != 0) { 4051 /* Add software bits in status register */ 4052 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4053 IDEDMA_CTL, idedma_ctl); 4054 } 4055 pciide_print_modes(cp); 4056 } 4057 4058 void 4059 pdc20268_setup_channel(chp) 4060 struct channel_softc *chp; 4061 { 4062 struct ata_drive_datas *drvp; 4063 int drive; 4064 u_int32_t idedma_ctl; 4065 struct pciide_channel *cp = (struct pciide_channel*)chp; 4066 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4067 int u100; 4068 4069 /* setup DMA if needed */ 4070 pciide_channel_dma_setup(cp); 4071 4072 idedma_ctl = 0; 4073 4074 /* I don't know what this is for, FreeBSD does it ... */ 4075 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4076 IDEDMA_CMD + 0x1, 0x0b); 4077 4078 /* 4079 * I don't know what this is for; FreeBSD checks this ... this is not 4080 * cable type detect. 4081 */ 4082 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4083 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1; 4084 4085 for (drive = 0; drive < 2; drive++) { 4086 drvp = &chp->ch_drive[drive]; 4087 /* If no drive, skip */ 4088 if ((drvp->drive_flags & DRIVE) == 0) 4089 continue; 4090 if (drvp->drive_flags & DRIVE_UDMA) { 4091 /* use Ultra/DMA */ 4092 drvp->drive_flags &= ~DRIVE_DMA; 4093 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4094 if (drvp->UDMA_mode > 2 && u100 == 0) 4095 drvp->UDMA_mode = 2; 4096 } else if (drvp->drive_flags & DRIVE_DMA) { 4097 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4098 } 4099 } 4100 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 4101 if (idedma_ctl != 0) { 4102 /* Add software bits in status register */ 4103 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4104 IDEDMA_CTL, idedma_ctl); 4105 } 4106 pciide_print_modes(cp); 4107 } 4108 4109 int 4110 pdc202xx_pci_intr(arg) 4111 void *arg; 4112 { 4113 struct pciide_softc *sc = arg; 4114 struct pciide_channel *cp; 4115 struct channel_softc *wdc_cp; 4116 int i, rv, crv; 4117 u_int32_t scr; 4118 4119 rv = 0; 4120 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 4121 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4122 cp = &sc->pciide_channels[i]; 4123 wdc_cp = &cp->wdc_channel; 4124 /* If a compat channel skip. */ 4125 if (cp->compat) 4126 continue; 4127 if (scr & PDC2xx_SCR_INT(i)) { 4128 crv = wdcintr(wdc_cp); 4129 if (crv == 0) 4130 printf("%s:%d: bogus intr (reg 0x%x)\n", 4131 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 4132 else 4133 rv = 1; 4134 } 4135 } 4136 return rv; 4137 } 4138 4139 int 4140 pdc20265_pci_intr(arg) 4141 void *arg; 4142 { 4143 struct pciide_softc *sc = arg; 4144 struct pciide_channel *cp; 4145 struct channel_softc *wdc_cp; 4146 int i, rv, crv; 4147 u_int32_t dmastat; 4148 4149 rv = 0; 4150 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4151 cp = &sc->pciide_channels[i]; 4152 wdc_cp = &cp->wdc_channel; 4153 /* If a compat channel skip. */ 4154 if (cp->compat) 4155 continue; 4156 /* 4157 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 4158 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 4159 * So use it instead (requires 2 reg reads instead of 1, 4160 * but we can't do it another way). 4161 */ 4162 dmastat = bus_space_read_1(sc->sc_dma_iot, 4163 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4164 if((dmastat & IDEDMA_CTL_INTR) == 0) 4165 continue; 4166 crv = wdcintr(wdc_cp); 4167 if (crv == 0) 4168 printf("%s:%d: bogus intr\n", 4169 sc->sc_wdcdev.sc_dev.dv_xname, i); 4170 else 4171 rv = 1; 4172 } 4173 return rv; 4174 } 4175 4176 void 4177 opti_chip_map(sc, pa) 4178 struct pciide_softc *sc; 4179 struct pci_attach_args *pa; 4180 { 4181 struct pciide_channel *cp; 4182 bus_size_t cmdsize, ctlsize; 4183 pcireg_t interface; 4184 u_int8_t init_ctrl; 4185 int channel; 4186 4187 if (pciide_chipen(sc, pa) == 0) 4188 return; 4189 printf("%s: bus-master DMA support present", 4190 sc->sc_wdcdev.sc_dev.dv_xname); 4191 4192 /* 4193 * XXXSCW: 4194 * There seem to be a couple of buggy revisions/implementations 4195 * of the OPTi pciide chipset. This kludge seems to fix one of 4196 * the reported problems (PR/11644) but still fails for the 4197 * other (PR/13151), although the latter may be due to other 4198 * issues too... 4199 */ 4200 if (PCI_REVISION(pa->pa_class) <= 0x12) { 4201 printf(" but disabled due to chip rev. <= 0x12"); 4202 sc->sc_dma_ok = 0; 4203 } else 4204 pciide_mapreg_dma(sc, pa); 4205 4206 printf("\n"); 4207 4208 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 | 4209 WDC_CAPABILITY_MODE; 4210 sc->sc_wdcdev.PIO_cap = 4; 4211 if (sc->sc_dma_ok) { 4212 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4213 sc->sc_wdcdev.irqack = pciide_irqack; 4214 sc->sc_wdcdev.DMA_cap = 2; 4215 } 4216 sc->sc_wdcdev.set_modes = opti_setup_channel; 4217 4218 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4219 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4220 4221 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 4222 OPTI_REG_INIT_CONTROL); 4223 4224 interface = PCI_INTERFACE(pa->pa_class); 4225 4226 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4227 cp = &sc->pciide_channels[channel]; 4228 if (pciide_chansetup(sc, channel, interface) == 0) 4229 continue; 4230 if (channel == 1 && 4231 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 4232 printf("%s: %s channel ignored (disabled)\n", 4233 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4234 continue; 4235 } 4236 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4237 pciide_pci_intr); 4238 if (cp->hw_ok == 0) 4239 continue; 4240 pciide_map_compat_intr(pa, cp, channel, interface); 4241 if (cp->hw_ok == 0) 4242 continue; 4243 opti_setup_channel(&cp->wdc_channel); 4244 } 4245 } 4246 4247 void 4248 opti_setup_channel(chp) 4249 struct channel_softc *chp; 4250 { 4251 struct ata_drive_datas *drvp; 4252 struct pciide_channel *cp = (struct pciide_channel*)chp; 4253 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4254 int drive, spd; 4255 int mode[2]; 4256 u_int8_t rv, mr; 4257 4258 /* 4259 * The `Delay' and `Address Setup Time' fields of the 4260 * Miscellaneous Register are always zero initially. 4261 */ 4262 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 4263 mr &= ~(OPTI_MISC_DELAY_MASK | 4264 OPTI_MISC_ADDR_SETUP_MASK | 4265 OPTI_MISC_INDEX_MASK); 4266 4267 /* Prime the control register before setting timing values */ 4268 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 4269 4270 /* Determine the clockrate of the PCIbus the chip is attached to */ 4271 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 4272 spd &= OPTI_STRAP_PCI_SPEED_MASK; 4273 4274 /* setup DMA if needed */ 4275 pciide_channel_dma_setup(cp); 4276 4277 for (drive = 0; drive < 2; drive++) { 4278 drvp = &chp->ch_drive[drive]; 4279 /* If no drive, skip */ 4280 if ((drvp->drive_flags & DRIVE) == 0) { 4281 mode[drive] = -1; 4282 continue; 4283 } 4284 4285 if ((drvp->drive_flags & DRIVE_DMA)) { 4286 /* 4287 * Timings will be used for both PIO and DMA, 4288 * so adjust DMA mode if needed 4289 */ 4290 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 4291 drvp->PIO_mode = drvp->DMA_mode + 2; 4292 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 4293 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 4294 drvp->PIO_mode - 2 : 0; 4295 if (drvp->DMA_mode == 0) 4296 drvp->PIO_mode = 0; 4297 4298 mode[drive] = drvp->DMA_mode + 5; 4299 } else 4300 mode[drive] = drvp->PIO_mode; 4301 4302 if (drive && mode[0] >= 0 && 4303 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 4304 /* 4305 * Can't have two drives using different values 4306 * for `Address Setup Time'. 4307 * Slow down the faster drive to compensate. 4308 */ 4309 int d = (opti_tim_as[spd][mode[0]] > 4310 opti_tim_as[spd][mode[1]]) ? 0 : 1; 4311 4312 mode[d] = mode[1-d]; 4313 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 4314 chp->ch_drive[d].DMA_mode = 0; 4315 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA; 4316 } 4317 } 4318 4319 for (drive = 0; drive < 2; drive++) { 4320 int m; 4321 if ((m = mode[drive]) < 0) 4322 continue; 4323 4324 /* Set the Address Setup Time and select appropriate index */ 4325 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 4326 rv |= OPTI_MISC_INDEX(drive); 4327 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 4328 4329 /* Set the pulse width and recovery timing parameters */ 4330 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 4331 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 4332 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 4333 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 4334 4335 /* Set the Enhanced Mode register appropriately */ 4336 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 4337 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 4338 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 4339 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 4340 } 4341 4342 /* Finally, enable the timings */ 4343 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 4344 4345 pciide_print_modes(cp); 4346 } 4347 4348 #define ACARD_IS_850(sc) \ 4349 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 4350 4351 void 4352 acard_chip_map(sc, pa) 4353 struct pciide_softc *sc; 4354 struct pci_attach_args *pa; 4355 { 4356 struct pciide_channel *cp; 4357 int i; 4358 pcireg_t interface; 4359 bus_size_t cmdsize, ctlsize; 4360 4361 if (pciide_chipen(sc, pa) == 0) 4362 return; 4363 4364 /* 4365 * when the chip is in native mode it identifies itself as a 4366 * 'misc mass storage'. Fake interface in this case. 4367 */ 4368 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4369 interface = PCI_INTERFACE(pa->pa_class); 4370 } else { 4371 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4372 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4373 } 4374 4375 printf("%s: bus-master DMA support present", 4376 sc->sc_wdcdev.sc_dev.dv_xname); 4377 pciide_mapreg_dma(sc, pa); 4378 printf("\n"); 4379 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4380 WDC_CAPABILITY_MODE; 4381 4382 if (sc->sc_dma_ok) { 4383 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4384 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4385 sc->sc_wdcdev.irqack = pciide_irqack; 4386 } 4387 sc->sc_wdcdev.PIO_cap = 4; 4388 sc->sc_wdcdev.DMA_cap = 2; 4389 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4; 4390 4391 sc->sc_wdcdev.set_modes = acard_setup_channel; 4392 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4393 sc->sc_wdcdev.nchannels = 2; 4394 4395 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4396 cp = &sc->pciide_channels[i]; 4397 if (pciide_chansetup(sc, i, interface) == 0) 4398 continue; 4399 if (interface & PCIIDE_INTERFACE_PCI(i)) { 4400 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 4401 &ctlsize, pciide_pci_intr); 4402 } else { 4403 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 4404 &cmdsize, &ctlsize); 4405 } 4406 if (cp->hw_ok == 0) 4407 return; 4408 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4409 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4410 wdcattach(&cp->wdc_channel); 4411 acard_setup_channel(&cp->wdc_channel); 4412 } 4413 if (!ACARD_IS_850(sc)) { 4414 u_int32_t reg; 4415 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 4416 reg &= ~ATP860_CTRL_INT; 4417 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 4418 } 4419 } 4420 4421 void 4422 acard_setup_channel(chp) 4423 struct channel_softc *chp; 4424 { 4425 struct ata_drive_datas *drvp; 4426 struct pciide_channel *cp = (struct pciide_channel*)chp; 4427 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4428 int channel = chp->channel; 4429 int drive; 4430 u_int32_t idetime, udma_mode; 4431 u_int32_t idedma_ctl; 4432 4433 /* setup DMA if needed */ 4434 pciide_channel_dma_setup(cp); 4435 4436 if (ACARD_IS_850(sc)) { 4437 idetime = 0; 4438 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 4439 udma_mode &= ~ATP850_UDMA_MASK(channel); 4440 } else { 4441 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 4442 idetime &= ~ATP860_SETTIME_MASK(channel); 4443 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 4444 udma_mode &= ~ATP860_UDMA_MASK(channel); 4445 4446 /* check 80 pins cable */ 4447 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) || 4448 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 4449 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 4450 & ATP860_CTRL_80P(chp->channel)) { 4451 if (chp->ch_drive[0].UDMA_mode > 2) 4452 chp->ch_drive[0].UDMA_mode = 2; 4453 if (chp->ch_drive[1].UDMA_mode > 2) 4454 chp->ch_drive[1].UDMA_mode = 2; 4455 } 4456 } 4457 } 4458 4459 idedma_ctl = 0; 4460 4461 /* Per drive settings */ 4462 for (drive = 0; drive < 2; drive++) { 4463 drvp = &chp->ch_drive[drive]; 4464 /* If no drive, skip */ 4465 if ((drvp->drive_flags & DRIVE) == 0) 4466 continue; 4467 /* add timing values, setup DMA if needed */ 4468 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 4469 (drvp->drive_flags & DRIVE_UDMA)) { 4470 /* use Ultra/DMA */ 4471 if (ACARD_IS_850(sc)) { 4472 idetime |= ATP850_SETTIME(drive, 4473 acard_act_udma[drvp->UDMA_mode], 4474 acard_rec_udma[drvp->UDMA_mode]); 4475 udma_mode |= ATP850_UDMA_MODE(channel, drive, 4476 acard_udma_conf[drvp->UDMA_mode]); 4477 } else { 4478 idetime |= ATP860_SETTIME(channel, drive, 4479 acard_act_udma[drvp->UDMA_mode], 4480 acard_rec_udma[drvp->UDMA_mode]); 4481 udma_mode |= ATP860_UDMA_MODE(channel, drive, 4482 acard_udma_conf[drvp->UDMA_mode]); 4483 } 4484 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4485 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 4486 (drvp->drive_flags & DRIVE_DMA)) { 4487 /* use Multiword DMA */ 4488 drvp->drive_flags &= ~DRIVE_UDMA; 4489 if (ACARD_IS_850(sc)) { 4490 idetime |= ATP850_SETTIME(drive, 4491 acard_act_dma[drvp->DMA_mode], 4492 acard_rec_dma[drvp->DMA_mode]); 4493 } else { 4494 idetime |= ATP860_SETTIME(channel, drive, 4495 acard_act_dma[drvp->DMA_mode], 4496 acard_rec_dma[drvp->DMA_mode]); 4497 } 4498 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4499 } else { 4500 /* PIO only */ 4501 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 4502 if (ACARD_IS_850(sc)) { 4503 idetime |= ATP850_SETTIME(drive, 4504 acard_act_pio[drvp->PIO_mode], 4505 acard_rec_pio[drvp->PIO_mode]); 4506 } else { 4507 idetime |= ATP860_SETTIME(channel, drive, 4508 acard_act_pio[drvp->PIO_mode], 4509 acard_rec_pio[drvp->PIO_mode]); 4510 } 4511 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 4512 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 4513 | ATP8x0_CTRL_EN(channel)); 4514 } 4515 } 4516 4517 if (idedma_ctl != 0) { 4518 /* Add software bits in status register */ 4519 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4520 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl); 4521 } 4522 pciide_print_modes(cp); 4523 4524 if (ACARD_IS_850(sc)) { 4525 pci_conf_write(sc->sc_pc, sc->sc_tag, 4526 ATP850_IDETIME(channel), idetime); 4527 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 4528 } else { 4529 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 4530 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 4531 } 4532 } 4533 4534 int 4535 acard_pci_intr(arg) 4536 void *arg; 4537 { 4538 struct pciide_softc *sc = arg; 4539 struct pciide_channel *cp; 4540 struct channel_softc *wdc_cp; 4541 int rv = 0; 4542 int dmastat, i, crv; 4543 4544 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4545 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4546 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4547 if ((dmastat & IDEDMA_CTL_INTR) == 0) 4548 continue; 4549 cp = &sc->pciide_channels[i]; 4550 wdc_cp = &cp->wdc_channel; 4551 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) { 4552 (void)wdcintr(wdc_cp); 4553 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4554 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 4555 continue; 4556 } 4557 crv = wdcintr(wdc_cp); 4558 if (crv == 0) 4559 printf("%s:%d: bogus intr\n", 4560 sc->sc_wdcdev.sc_dev.dv_xname, i); 4561 else if (crv == 1) 4562 rv = 1; 4563 else if (rv == 0) 4564 rv = crv; 4565 } 4566 return rv; 4567 } 4568 4569 static int 4570 sl82c105_bugchk(struct pci_attach_args *pa) 4571 { 4572 4573 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND || 4574 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0) 4575 return (0); 4576 4577 if (PCI_REVISION(pa->pa_class) <= 0x05) 4578 return (1); 4579 4580 return (0); 4581 } 4582 4583 void 4584 sl82c105_chip_map(sc, pa) 4585 struct pciide_softc *sc; 4586 struct pci_attach_args *pa; 4587 { 4588 struct pciide_channel *cp; 4589 bus_size_t cmdsize, ctlsize; 4590 pcireg_t interface, idecr; 4591 int channel; 4592 4593 if (pciide_chipen(sc, pa) == 0) 4594 return; 4595 4596 printf("%s: bus-master DMA support present", 4597 sc->sc_wdcdev.sc_dev.dv_xname); 4598 4599 /* 4600 * Check to see if we're part of the Winbond 83c553 Southbridge. 4601 * If so, we need to disable DMA on rev. <= 5 of that chip. 4602 */ 4603 if (pci_find_device(pa, sl82c105_bugchk)) { 4604 printf(" but disabled due to 83c553 rev. <= 0x05"); 4605 sc->sc_dma_ok = 0; 4606 } else 4607 pciide_mapreg_dma(sc, pa); 4608 printf("\n"); 4609 4610 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 | 4611 WDC_CAPABILITY_MODE; 4612 sc->sc_wdcdev.PIO_cap = 4; 4613 if (sc->sc_dma_ok) { 4614 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4615 sc->sc_wdcdev.irqack = pciide_irqack; 4616 sc->sc_wdcdev.DMA_cap = 2; 4617 } 4618 sc->sc_wdcdev.set_modes = sl82c105_setup_channel; 4619 4620 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4621 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4622 4623 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR); 4624 4625 interface = PCI_INTERFACE(pa->pa_class); 4626 4627 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4628 cp = &sc->pciide_channels[channel]; 4629 if (pciide_chansetup(sc, channel, interface) == 0) 4630 continue; 4631 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) || 4632 (channel == 1 && (idecr & IDECR_P1EN) == 0)) { 4633 printf("%s: %s channel ignored (disabled)\n", 4634 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4635 continue; 4636 } 4637 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4638 pciide_pci_intr); 4639 if (cp->hw_ok == 0) 4640 continue; 4641 pciide_map_compat_intr(pa, cp, channel, interface); 4642 if (cp->hw_ok == 0) 4643 continue; 4644 sl82c105_setup_channel(&cp->wdc_channel); 4645 } 4646 } 4647 4648 void 4649 sl82c105_setup_channel(chp) 4650 struct channel_softc *chp; 4651 { 4652 struct ata_drive_datas *drvp; 4653 struct pciide_channel *cp = (struct pciide_channel*)chp; 4654 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4655 int pxdx_reg, drive; 4656 pcireg_t pxdx; 4657 4658 /* Set up DMA if needed. */ 4659 pciide_channel_dma_setup(cp); 4660 4661 for (drive = 0; drive < 2; drive++) { 4662 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR 4663 : SYMPH_P1D0CR) + (drive * 4); 4664 4665 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg); 4666 4667 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK); 4668 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN); 4669 4670 drvp = &chp->ch_drive[drive]; 4671 /* If no drive, skip. */ 4672 if ((drvp->drive_flags & DRIVE) == 0) { 4673 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx); 4674 continue; 4675 } 4676 4677 if (drvp->drive_flags & DRIVE_DMA) { 4678 /* 4679 * Timings will be used for both PIO and DMA, 4680 * so adjust DMA mode if needed. 4681 */ 4682 if (drvp->PIO_mode >= 3) { 4683 if ((drvp->DMA_mode + 2) > drvp->PIO_mode) 4684 drvp->DMA_mode = drvp->PIO_mode - 2; 4685 if (drvp->DMA_mode < 1) { 4686 /* 4687 * Can't mix both PIO and DMA. 4688 * Disable DMA. 4689 */ 4690 drvp->drive_flags &= ~DRIVE_DMA; 4691 } 4692 } else { 4693 /* 4694 * Can't mix both PIO and DMA. Disable 4695 * DMA. 4696 */ 4697 drvp->drive_flags &= ~DRIVE_DMA; 4698 } 4699 } 4700 4701 if (drvp->drive_flags & DRIVE_DMA) { 4702 /* Use multi-word DMA. */ 4703 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on << 4704 PxDx_CMD_ON_SHIFT; 4705 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off; 4706 } else { 4707 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on << 4708 PxDx_CMD_ON_SHIFT; 4709 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off; 4710 } 4711 4712 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */ 4713 4714 /* ...and set the mode for this drive. */ 4715 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx); 4716 } 4717 4718 pciide_print_modes(cp); 4719 } 4720 4721 void 4722 serverworks_chip_map(sc, pa) 4723 struct pciide_softc *sc; 4724 struct pci_attach_args *pa; 4725 { 4726 struct pciide_channel *cp; 4727 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4728 pcitag_t pcib_tag; 4729 int channel; 4730 bus_size_t cmdsize, ctlsize; 4731 4732 if (pciide_chipen(sc, pa) == 0) 4733 return; 4734 4735 printf("%s: bus-master DMA support present", 4736 sc->sc_wdcdev.sc_dev.dv_xname); 4737 pciide_mapreg_dma(sc, pa); 4738 printf("\n"); 4739 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4740 WDC_CAPABILITY_MODE; 4741 4742 if (sc->sc_dma_ok) { 4743 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4744 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4745 sc->sc_wdcdev.irqack = pciide_irqack; 4746 } 4747 sc->sc_wdcdev.PIO_cap = 4; 4748 sc->sc_wdcdev.DMA_cap = 2; 4749 switch (sc->sc_pp->ide_product) { 4750 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE: 4751 sc->sc_wdcdev.UDMA_cap = 2; 4752 break; 4753 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE: 4754 if (PCI_REVISION(pa->pa_class) < 0x92) 4755 sc->sc_wdcdev.UDMA_cap = 4; 4756 else 4757 sc->sc_wdcdev.UDMA_cap = 5; 4758 break; 4759 } 4760 4761 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 4762 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4763 sc->sc_wdcdev.nchannels = 2; 4764 4765 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4766 cp = &sc->pciide_channels[channel]; 4767 if (pciide_chansetup(sc, channel, interface) == 0) 4768 continue; 4769 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4770 serverworks_pci_intr); 4771 if (cp->hw_ok == 0) 4772 return; 4773 pciide_map_compat_intr(pa, cp, channel, interface); 4774 if (cp->hw_ok == 0) 4775 return; 4776 serverworks_setup_channel(&cp->wdc_channel); 4777 } 4778 4779 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 4780 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 4781 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 4782 } 4783 4784 void 4785 serverworks_setup_channel(chp) 4786 struct channel_softc *chp; 4787 { 4788 struct ata_drive_datas *drvp; 4789 struct pciide_channel *cp = (struct pciide_channel*)chp; 4790 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4791 int channel = chp->channel; 4792 int drive, unit; 4793 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 4794 u_int32_t idedma_ctl; 4795 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 4796 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 4797 4798 /* setup DMA if needed */ 4799 pciide_channel_dma_setup(cp); 4800 4801 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 4802 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 4803 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 4804 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 4805 4806 pio_time &= ~(0xffff << (16 * channel)); 4807 dma_time &= ~(0xffff << (16 * channel)); 4808 pio_mode &= ~(0xff << (8 * channel + 16)); 4809 udma_mode &= ~(0xff << (8 * channel + 16)); 4810 udma_mode &= ~(3 << (2 * channel)); 4811 4812 idedma_ctl = 0; 4813 4814 /* Per drive settings */ 4815 for (drive = 0; drive < 2; drive++) { 4816 drvp = &chp->ch_drive[drive]; 4817 /* If no drive, skip */ 4818 if ((drvp->drive_flags & DRIVE) == 0) 4819 continue; 4820 unit = drive + 2 * channel; 4821 /* add timing values, setup DMA if needed */ 4822 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 4823 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 4824 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 4825 (drvp->drive_flags & DRIVE_UDMA)) { 4826 /* use Ultra/DMA, check for 80-pin cable */ 4827 if (drvp->UDMA_mode > 2 && 4828 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0) 4829 drvp->UDMA_mode = 2; 4830 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 4831 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 4832 udma_mode |= 1 << unit; 4833 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4834 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 4835 (drvp->drive_flags & DRIVE_DMA)) { 4836 /* use Multiword DMA */ 4837 drvp->drive_flags &= ~DRIVE_UDMA; 4838 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 4839 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4840 } else { 4841 /* PIO only */ 4842 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 4843 } 4844 } 4845 4846 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 4847 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 4848 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE) 4849 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 4850 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 4851 4852 if (idedma_ctl != 0) { 4853 /* Add software bits in status register */ 4854 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4855 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl); 4856 } 4857 pciide_print_modes(cp); 4858 } 4859 4860 int 4861 serverworks_pci_intr(arg) 4862 void *arg; 4863 { 4864 struct pciide_softc *sc = arg; 4865 struct pciide_channel *cp; 4866 struct channel_softc *wdc_cp; 4867 int rv = 0; 4868 int dmastat, i, crv; 4869 4870 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4871 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4872 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 4873 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 4874 IDEDMA_CTL_INTR) 4875 continue; 4876 cp = &sc->pciide_channels[i]; 4877 wdc_cp = &cp->wdc_channel; 4878 crv = wdcintr(wdc_cp); 4879 if (crv == 0) { 4880 printf("%s:%d: bogus intr\n", 4881 sc->sc_wdcdev.sc_dev.dv_xname, i); 4882 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4883 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 4884 } else 4885 rv = 1; 4886 } 4887 return rv; 4888 } 4889