1 /* $NetBSD: pciide.c,v 1.100 2000/12/29 18:59:01 tsutsui Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36 37 /* 38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by Christopher G. Demetriou 51 * for the NetBSD Project. 52 * 4. The name of the author may not be used to endorse or promote products 53 * derived from this software without specific prior written permission 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 */ 66 67 /* 68 * PCI IDE controller driver. 69 * 70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 71 * sys/dev/pci/ppb.c, revision 1.16). 72 * 73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 75 * 5/16/94" from the PCI SIG. 76 * 77 */ 78 79 #ifndef WDCDEBUG 80 #define WDCDEBUG 81 #endif 82 83 #define DEBUG_DMA 0x01 84 #define DEBUG_XFERS 0x02 85 #define DEBUG_FUNCS 0x08 86 #define DEBUG_PROBE 0x10 87 #ifdef WDCDEBUG 88 int wdcdebug_pciide_mask = 0; 89 #define WDCDEBUG_PRINT(args, level) \ 90 if (wdcdebug_pciide_mask & (level)) printf args 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <uvm/uvm_extern.h> 100 101 #include <machine/endian.h> 102 103 #include <dev/pci/pcireg.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcidevs.h> 106 #include <dev/pci/pciidereg.h> 107 #include <dev/pci/pciidevar.h> 108 #include <dev/pci/pciide_piix_reg.h> 109 #include <dev/pci/pciide_amd_reg.h> 110 #include <dev/pci/pciide_apollo_reg.h> 111 #include <dev/pci/pciide_cmd_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/cy82c693var.h> 119 120 #include "opt_pciide.h" 121 122 /* inlines for reading/writing 8-bit PCI registers */ 123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t, 124 int)); 125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t, 126 int, u_int8_t)); 127 128 static __inline u_int8_t 129 pciide_pci_read(pc, pa, reg) 130 pci_chipset_tag_t pc; 131 pcitag_t pa; 132 int reg; 133 { 134 135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 136 ((reg & 0x03) * 8) & 0xff); 137 } 138 139 static __inline void 140 pciide_pci_write(pc, pa, reg, val) 141 pci_chipset_tag_t pc; 142 pcitag_t pa; 143 int reg; 144 u_int8_t val; 145 { 146 pcireg_t pcival; 147 148 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 149 pcival &= ~(0xff << ((reg & 0x03) * 8)); 150 pcival |= (val << ((reg & 0x03) * 8)); 151 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 152 } 153 154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 155 156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 157 void piix_setup_channel __P((struct channel_softc*)); 158 void piix3_4_setup_channel __P((struct channel_softc*)); 159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*)); 161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 162 163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 164 void amd756_setup_channel __P((struct channel_softc*)); 165 166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 167 void apollo_setup_channel __P((struct channel_softc*)); 168 169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 170 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 171 void cmd0643_9_setup_channel __P((struct channel_softc*)); 172 void cmd_channel_map __P((struct pci_attach_args *, 173 struct pciide_softc *, int)); 174 int cmd_pci_intr __P((void *)); 175 void cmd646_9_irqack __P((struct channel_softc *)); 176 177 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 178 void cy693_setup_channel __P((struct channel_softc*)); 179 180 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 181 void sis_setup_channel __P((struct channel_softc*)); 182 183 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 184 void acer_setup_channel __P((struct channel_softc*)); 185 int acer_pci_intr __P((void *)); 186 187 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 188 void pdc202xx_setup_channel __P((struct channel_softc*)); 189 int pdc202xx_pci_intr __P((void *)); 190 191 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 192 void opti_setup_channel __P((struct channel_softc*)); 193 194 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 195 void hpt_setup_channel __P((struct channel_softc*)); 196 int hpt_pci_intr __P((void *)); 197 198 void pciide_channel_dma_setup __P((struct pciide_channel *)); 199 int pciide_dma_table_setup __P((struct pciide_softc*, int, int)); 200 int pciide_dma_init __P((void*, int, int, void *, size_t, int)); 201 void pciide_dma_start __P((void*, int, int)); 202 int pciide_dma_finish __P((void*, int, int, int)); 203 void pciide_irqack __P((struct channel_softc *)); 204 void pciide_print_modes __P((struct pciide_channel *)); 205 206 struct pciide_product_desc { 207 u_int32_t ide_product; 208 int ide_flags; 209 const char *ide_name; 210 /* map and setup chip, probe drives */ 211 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*)); 212 }; 213 214 /* Flags for ide_flags */ 215 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 216 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 217 218 /* Default product description for devices not known from this controller */ 219 const struct pciide_product_desc default_product_desc = { 220 0, 221 0, 222 "Generic PCI IDE controller", 223 default_chip_map, 224 }; 225 226 const struct pciide_product_desc pciide_intel_products[] = { 227 { PCI_PRODUCT_INTEL_82092AA, 228 0, 229 "Intel 82092AA IDE controller", 230 default_chip_map, 231 }, 232 { PCI_PRODUCT_INTEL_82371FB_IDE, 233 0, 234 "Intel 82371FB IDE controller (PIIX)", 235 piix_chip_map, 236 }, 237 { PCI_PRODUCT_INTEL_82371SB_IDE, 238 0, 239 "Intel 82371SB IDE Interface (PIIX3)", 240 piix_chip_map, 241 }, 242 { PCI_PRODUCT_INTEL_82371AB_IDE, 243 0, 244 "Intel 82371AB IDE controller (PIIX4)", 245 piix_chip_map, 246 }, 247 { PCI_PRODUCT_INTEL_82440MX_IDE, 248 0, 249 "Intel 82440MX IDE controller", 250 piix_chip_map 251 }, 252 { PCI_PRODUCT_INTEL_82801AA_IDE, 253 0, 254 "Intel 82801AA IDE Controller (ICH)", 255 piix_chip_map, 256 }, 257 { PCI_PRODUCT_INTEL_82801AB_IDE, 258 0, 259 "Intel 82801AB IDE Controller (ICH0)", 260 piix_chip_map, 261 }, 262 { PCI_PRODUCT_INTEL_82801BA_IDE, 263 0, 264 "Intel 82801BA IDE Controller (ICH2)", 265 piix_chip_map, 266 }, 267 { 0, 268 0, 269 NULL, 270 } 271 }; 272 273 const struct pciide_product_desc pciide_amd_products[] = { 274 { PCI_PRODUCT_AMD_PBC756_IDE, 275 0, 276 "Advanced Micro Devices AMD756 IDE Controller", 277 amd756_chip_map 278 }, 279 { 0, 280 0, 281 NULL, 282 } 283 }; 284 285 const struct pciide_product_desc pciide_cmd_products[] = { 286 { PCI_PRODUCT_CMDTECH_640, 287 0, 288 "CMD Technology PCI0640", 289 cmd_chip_map 290 }, 291 { PCI_PRODUCT_CMDTECH_643, 292 0, 293 "CMD Technology PCI0643", 294 cmd0643_9_chip_map, 295 }, 296 { PCI_PRODUCT_CMDTECH_646, 297 0, 298 "CMD Technology PCI0646", 299 cmd0643_9_chip_map, 300 }, 301 { PCI_PRODUCT_CMDTECH_648, 302 IDE_PCI_CLASS_OVERRIDE, 303 "CMD Technology PCI0648", 304 cmd0643_9_chip_map, 305 }, 306 { PCI_PRODUCT_CMDTECH_649, 307 IDE_PCI_CLASS_OVERRIDE, 308 "CMD Technology PCI0649", 309 cmd0643_9_chip_map, 310 }, 311 { 0, 312 0, 313 NULL, 314 } 315 }; 316 317 const struct pciide_product_desc pciide_via_products[] = { 318 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 319 0, 320 "VIA Tech VT82C586 IDE Controller", 321 apollo_chip_map, 322 }, 323 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 324 0, 325 "VIA Tech VT82C586A IDE Controller", 326 apollo_chip_map, 327 }, 328 { 0, 329 0, 330 NULL, 331 } 332 }; 333 334 const struct pciide_product_desc pciide_cypress_products[] = { 335 { PCI_PRODUCT_CONTAQ_82C693, 336 IDE_16BIT_IOSPACE, 337 "Cypress 82C693 IDE Controller", 338 cy693_chip_map, 339 }, 340 { 0, 341 0, 342 NULL, 343 } 344 }; 345 346 const struct pciide_product_desc pciide_sis_products[] = { 347 { PCI_PRODUCT_SIS_5597_IDE, 348 0, 349 "Silicon Integrated System 5597/5598 IDE controller", 350 sis_chip_map, 351 }, 352 { 0, 353 0, 354 NULL, 355 } 356 }; 357 358 const struct pciide_product_desc pciide_acer_products[] = { 359 { PCI_PRODUCT_ALI_M5229, 360 0, 361 "Acer Labs M5229 UDMA IDE Controller", 362 acer_chip_map, 363 }, 364 { 0, 365 0, 366 NULL, 367 } 368 }; 369 370 const struct pciide_product_desc pciide_promise_products[] = { 371 { PCI_PRODUCT_PROMISE_ULTRA33, 372 IDE_PCI_CLASS_OVERRIDE, 373 "Promise Ultra33/ATA Bus Master IDE Accelerator", 374 pdc202xx_chip_map, 375 }, 376 { PCI_PRODUCT_PROMISE_ULTRA66, 377 IDE_PCI_CLASS_OVERRIDE, 378 "Promise Ultra66/ATA Bus Master IDE Accelerator", 379 pdc202xx_chip_map, 380 }, 381 { PCI_PRODUCT_PROMISE_ULTRA100, 382 IDE_PCI_CLASS_OVERRIDE, 383 "Promise Ultra100/ATA Bus Master IDE Accelerator", 384 pdc202xx_chip_map, 385 }, 386 { PCI_PRODUCT_PROMISE_ULTRA100X, 387 IDE_PCI_CLASS_OVERRIDE, 388 "Promise Ultra100/ATA Bus Master IDE Accelerator", 389 pdc202xx_chip_map, 390 }, 391 { 0, 392 0, 393 NULL, 394 } 395 }; 396 397 const struct pciide_product_desc pciide_opti_products[] = { 398 { PCI_PRODUCT_OPTI_82C621, 399 0, 400 "OPTi 82c621 PCI IDE controller", 401 opti_chip_map, 402 }, 403 { PCI_PRODUCT_OPTI_82C568, 404 0, 405 "OPTi 82c568 (82c621 compatible) PCI IDE controller", 406 opti_chip_map, 407 }, 408 { PCI_PRODUCT_OPTI_82D568, 409 0, 410 "OPTi 82d568 (82c621 compatible) PCI IDE controller", 411 opti_chip_map, 412 }, 413 { 0, 414 0, 415 NULL, 416 } 417 }; 418 419 const struct pciide_product_desc pciide_triones_products[] = { 420 { PCI_PRODUCT_TRIONES_HPT366, 421 IDE_PCI_CLASS_OVERRIDE, 422 "Triones/Highpoint HPT366/370 IDE Controller", 423 hpt_chip_map, 424 }, 425 { 0, 426 0, 427 NULL, 428 } 429 }; 430 431 struct pciide_vendor_desc { 432 u_int32_t ide_vendor; 433 const struct pciide_product_desc *ide_products; 434 }; 435 436 const struct pciide_vendor_desc pciide_vendors[] = { 437 { PCI_VENDOR_INTEL, pciide_intel_products }, 438 { PCI_VENDOR_CMDTECH, pciide_cmd_products }, 439 { PCI_VENDOR_VIATECH, pciide_via_products }, 440 { PCI_VENDOR_CONTAQ, pciide_cypress_products }, 441 { PCI_VENDOR_SIS, pciide_sis_products }, 442 { PCI_VENDOR_ALI, pciide_acer_products }, 443 { PCI_VENDOR_PROMISE, pciide_promise_products }, 444 { PCI_VENDOR_AMD, pciide_amd_products }, 445 { PCI_VENDOR_OPTI, pciide_opti_products }, 446 { PCI_VENDOR_TRIONES, pciide_triones_products }, 447 { 0, NULL } 448 }; 449 450 /* options passed via the 'flags' config keyword */ 451 #define PCIIDE_OPTIONS_DMA 0x01 452 453 int pciide_match __P((struct device *, struct cfdata *, void *)); 454 void pciide_attach __P((struct device *, struct device *, void *)); 455 456 struct cfattach pciide_ca = { 457 sizeof(struct pciide_softc), pciide_match, pciide_attach 458 }; 459 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *)); 460 int pciide_mapregs_compat __P(( struct pci_attach_args *, 461 struct pciide_channel *, int, bus_size_t *, bus_size_t*)); 462 int pciide_mapregs_native __P((struct pci_attach_args *, 463 struct pciide_channel *, bus_size_t *, bus_size_t *, 464 int (*pci_intr) __P((void *)))); 465 void pciide_mapreg_dma __P((struct pciide_softc *, 466 struct pci_attach_args *)); 467 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t)); 468 void pciide_mapchan __P((struct pci_attach_args *, 469 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 470 int (*pci_intr) __P((void *)))); 471 int pciide_chan_candisable __P((struct pciide_channel *)); 472 void pciide_map_compat_intr __P(( struct pci_attach_args *, 473 struct pciide_channel *, int, int)); 474 int pciide_print __P((void *, const char *pnp)); 475 int pciide_compat_intr __P((void *)); 476 int pciide_pci_intr __P((void *)); 477 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t)); 478 479 const struct pciide_product_desc * 480 pciide_lookup_product(id) 481 u_int32_t id; 482 { 483 const struct pciide_product_desc *pp; 484 const struct pciide_vendor_desc *vp; 485 486 for (vp = pciide_vendors; vp->ide_products != NULL; vp++) 487 if (PCI_VENDOR(id) == vp->ide_vendor) 488 break; 489 490 if ((pp = vp->ide_products) == NULL) 491 return NULL; 492 493 for (; pp->ide_name != NULL; pp++) 494 if (PCI_PRODUCT(id) == pp->ide_product) 495 break; 496 497 if (pp->ide_name == NULL) 498 return NULL; 499 return pp; 500 } 501 502 int 503 pciide_match(parent, match, aux) 504 struct device *parent; 505 struct cfdata *match; 506 void *aux; 507 { 508 struct pci_attach_args *pa = aux; 509 const struct pciide_product_desc *pp; 510 511 /* 512 * Check the ID register to see that it's a PCI IDE controller. 513 * If it is, we assume that we can deal with it; it _should_ 514 * work in a standardized way... 515 */ 516 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 517 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 518 return (1); 519 } 520 521 /* 522 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE 523 * controllers. Let see if we can deal with it anyway. 524 */ 525 pp = pciide_lookup_product(pa->pa_id); 526 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 527 return (1); 528 } 529 530 return (0); 531 } 532 533 void 534 pciide_attach(parent, self, aux) 535 struct device *parent, *self; 536 void *aux; 537 { 538 struct pci_attach_args *pa = aux; 539 pci_chipset_tag_t pc = pa->pa_pc; 540 pcitag_t tag = pa->pa_tag; 541 struct pciide_softc *sc = (struct pciide_softc *)self; 542 pcireg_t csr; 543 char devinfo[256]; 544 const char *displaydev; 545 546 sc->sc_pp = pciide_lookup_product(pa->pa_id); 547 if (sc->sc_pp == NULL) { 548 sc->sc_pp = &default_product_desc; 549 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); 550 displaydev = devinfo; 551 } else 552 displaydev = sc->sc_pp->ide_name; 553 554 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class)); 555 556 sc->sc_pc = pa->pa_pc; 557 sc->sc_tag = pa->pa_tag; 558 #ifdef WDCDEBUG 559 if (wdcdebug_pciide_mask & DEBUG_PROBE) 560 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 561 #endif 562 sc->sc_pp->chip_map(sc, pa); 563 564 if (sc->sc_dma_ok) { 565 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 566 csr |= PCI_COMMAND_MASTER_ENABLE; 567 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 568 } 569 WDCDEBUG_PRINT(("pciide: command/status register=%x\n", 570 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 571 } 572 573 /* tell wether the chip is enabled or not */ 574 int 575 pciide_chipen(sc, pa) 576 struct pciide_softc *sc; 577 struct pci_attach_args *pa; 578 { 579 pcireg_t csr; 580 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 581 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 582 PCI_COMMAND_STATUS_REG); 583 printf("%s: device disabled (at %s)\n", 584 sc->sc_wdcdev.sc_dev.dv_xname, 585 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 586 "device" : "bridge"); 587 return 0; 588 } 589 return 1; 590 } 591 592 int 593 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 594 struct pci_attach_args *pa; 595 struct pciide_channel *cp; 596 int compatchan; 597 bus_size_t *cmdsizep, *ctlsizep; 598 { 599 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 600 struct channel_softc *wdc_cp = &cp->wdc_channel; 601 602 cp->compat = 1; 603 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 604 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 605 606 wdc_cp->cmd_iot = pa->pa_iot; 607 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 608 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 609 printf("%s: couldn't map %s channel cmd regs\n", 610 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 611 return (0); 612 } 613 614 wdc_cp->ctl_iot = pa->pa_iot; 615 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 616 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 617 printf("%s: couldn't map %s channel ctl regs\n", 618 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 619 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 620 PCIIDE_COMPAT_CMD_SIZE); 621 return (0); 622 } 623 624 return (1); 625 } 626 627 int 628 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 629 struct pci_attach_args * pa; 630 struct pciide_channel *cp; 631 bus_size_t *cmdsizep, *ctlsizep; 632 int (*pci_intr) __P((void *)); 633 { 634 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 635 struct channel_softc *wdc_cp = &cp->wdc_channel; 636 const char *intrstr; 637 pci_intr_handle_t intrhandle; 638 639 cp->compat = 0; 640 641 if (sc->sc_pci_ih == NULL) { 642 if (pci_intr_map(pa, &intrhandle) != 0) { 643 printf("%s: couldn't map native-PCI interrupt\n", 644 sc->sc_wdcdev.sc_dev.dv_xname); 645 return 0; 646 } 647 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 648 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 649 intrhandle, IPL_BIO, pci_intr, sc); 650 if (sc->sc_pci_ih != NULL) { 651 printf("%s: using %s for native-PCI interrupt\n", 652 sc->sc_wdcdev.sc_dev.dv_xname, 653 intrstr ? intrstr : "unknown interrupt"); 654 } else { 655 printf("%s: couldn't establish native-PCI interrupt", 656 sc->sc_wdcdev.sc_dev.dv_xname); 657 if (intrstr != NULL) 658 printf(" at %s", intrstr); 659 printf("\n"); 660 return 0; 661 } 662 } 663 cp->ih = sc->sc_pci_ih; 664 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 665 PCI_MAPREG_TYPE_IO, 0, 666 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) { 667 printf("%s: couldn't map %s channel cmd regs\n", 668 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 669 return 0; 670 } 671 672 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 673 PCI_MAPREG_TYPE_IO, 0, 674 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) { 675 printf("%s: couldn't map %s channel ctl regs\n", 676 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 677 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 678 return 0; 679 } 680 return (1); 681 } 682 683 void 684 pciide_mapreg_dma(sc, pa) 685 struct pciide_softc *sc; 686 struct pci_attach_args *pa; 687 { 688 pcireg_t maptype; 689 bus_addr_t addr; 690 691 /* 692 * Map DMA registers 693 * 694 * Note that sc_dma_ok is the right variable to test to see if 695 * DMA can be done. If the interface doesn't support DMA, 696 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 697 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 698 * non-zero if the interface supports DMA and the registers 699 * could be mapped. 700 * 701 * XXX Note that despite the fact that the Bus Master IDE specs 702 * XXX say that "The bus master IDE function uses 16 bytes of IO 703 * XXX space," some controllers (at least the United 704 * XXX Microelectronics UM8886BF) place it in memory space. 705 */ 706 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 707 PCIIDE_REG_BUS_MASTER_DMA); 708 709 switch (maptype) { 710 case PCI_MAPREG_TYPE_IO: 711 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 712 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 713 &addr, NULL, NULL) == 0); 714 if (sc->sc_dma_ok == 0) { 715 printf(", but unused (couldn't query registers)"); 716 break; 717 } 718 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 719 && addr >= 0x10000) { 720 sc->sc_dma_ok = 0; 721 printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr); 722 break; 723 } 724 /* FALLTHROUGH */ 725 726 case PCI_MAPREG_MEM_TYPE_32BIT: 727 sc->sc_dma_ok = (pci_mapreg_map(pa, 728 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 729 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 730 sc->sc_dmat = pa->pa_dmat; 731 if (sc->sc_dma_ok == 0) { 732 printf(", but unused (couldn't map registers)"); 733 } else { 734 sc->sc_wdcdev.dma_arg = sc; 735 sc->sc_wdcdev.dma_init = pciide_dma_init; 736 sc->sc_wdcdev.dma_start = pciide_dma_start; 737 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 738 } 739 break; 740 741 default: 742 sc->sc_dma_ok = 0; 743 printf(", but unsupported register maptype (0x%x)", maptype); 744 } 745 } 746 747 int 748 pciide_compat_intr(arg) 749 void *arg; 750 { 751 struct pciide_channel *cp = arg; 752 753 #ifdef DIAGNOSTIC 754 /* should only be called for a compat channel */ 755 if (cp->compat == 0) 756 panic("pciide compat intr called for non-compat chan %p\n", cp); 757 #endif 758 return (wdcintr(&cp->wdc_channel)); 759 } 760 761 int 762 pciide_pci_intr(arg) 763 void *arg; 764 { 765 struct pciide_softc *sc = arg; 766 struct pciide_channel *cp; 767 struct channel_softc *wdc_cp; 768 int i, rv, crv; 769 770 rv = 0; 771 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 772 cp = &sc->pciide_channels[i]; 773 wdc_cp = &cp->wdc_channel; 774 775 /* If a compat channel skip. */ 776 if (cp->compat) 777 continue; 778 /* if this channel not waiting for intr, skip */ 779 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 780 continue; 781 782 crv = wdcintr(wdc_cp); 783 if (crv == 0) 784 ; /* leave rv alone */ 785 else if (crv == 1) 786 rv = 1; /* claim the intr */ 787 else if (rv == 0) /* crv should be -1 in this case */ 788 rv = crv; /* if we've done no better, take it */ 789 } 790 return (rv); 791 } 792 793 void 794 pciide_channel_dma_setup(cp) 795 struct pciide_channel *cp; 796 { 797 int drive; 798 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 799 struct ata_drive_datas *drvp; 800 801 for (drive = 0; drive < 2; drive++) { 802 drvp = &cp->wdc_channel.ch_drive[drive]; 803 /* If no drive, skip */ 804 if ((drvp->drive_flags & DRIVE) == 0) 805 continue; 806 /* setup DMA if needed */ 807 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 808 (drvp->drive_flags & DRIVE_UDMA) == 0) || 809 sc->sc_dma_ok == 0) { 810 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 811 continue; 812 } 813 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 814 != 0) { 815 /* Abort DMA setup */ 816 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 817 continue; 818 } 819 } 820 } 821 822 int 823 pciide_dma_table_setup(sc, channel, drive) 824 struct pciide_softc *sc; 825 int channel, drive; 826 { 827 bus_dma_segment_t seg; 828 int error, rseg; 829 const bus_size_t dma_table_size = 830 sizeof(struct idedma_table) * NIDEDMA_TABLES; 831 struct pciide_dma_maps *dma_maps = 832 &sc->pciide_channels[channel].dma_maps[drive]; 833 834 /* If table was already allocated, just return */ 835 if (dma_maps->dma_table) 836 return 0; 837 838 /* Allocate memory for the DMA tables and map it */ 839 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 840 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 841 BUS_DMA_NOWAIT)) != 0) { 842 printf("%s:%d: unable to allocate table DMA for " 843 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 844 channel, drive, error); 845 return error; 846 } 847 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 848 dma_table_size, 849 (caddr_t *)&dma_maps->dma_table, 850 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 851 printf("%s:%d: unable to map table DMA for" 852 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 853 channel, drive, error); 854 return error; 855 } 856 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 857 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 858 (unsigned long)seg.ds_addr), DEBUG_PROBE); 859 860 /* Create and load table DMA map for this disk */ 861 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 862 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 863 &dma_maps->dmamap_table)) != 0) { 864 printf("%s:%d: unable to create table DMA map for " 865 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 866 channel, drive, error); 867 return error; 868 } 869 if ((error = bus_dmamap_load(sc->sc_dmat, 870 dma_maps->dmamap_table, 871 dma_maps->dma_table, 872 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 873 printf("%s:%d: unable to load table DMA map for " 874 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 875 channel, drive, error); 876 return error; 877 } 878 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 879 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 880 DEBUG_PROBE); 881 /* Create a xfer DMA map for this drive */ 882 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 883 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN, 884 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 885 &dma_maps->dmamap_xfer)) != 0) { 886 printf("%s:%d: unable to create xfer DMA map for " 887 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 888 channel, drive, error); 889 return error; 890 } 891 return 0; 892 } 893 894 int 895 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 896 void *v; 897 int channel, drive; 898 void *databuf; 899 size_t datalen; 900 int flags; 901 { 902 struct pciide_softc *sc = v; 903 int error, seg; 904 struct pciide_dma_maps *dma_maps = 905 &sc->pciide_channels[channel].dma_maps[drive]; 906 907 error = bus_dmamap_load(sc->sc_dmat, 908 dma_maps->dmamap_xfer, 909 databuf, datalen, NULL, BUS_DMA_NOWAIT); 910 if (error) { 911 printf("%s:%d: unable to load xfer DMA map for" 912 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 913 channel, drive, error); 914 return error; 915 } 916 917 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 918 dma_maps->dmamap_xfer->dm_mapsize, 919 (flags & WDC_DMA_READ) ? 920 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 921 922 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 923 #ifdef DIAGNOSTIC 924 /* A segment must not cross a 64k boundary */ 925 { 926 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 927 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 928 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 929 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 930 printf("pciide_dma: segment %d physical addr 0x%lx" 931 " len 0x%lx not properly aligned\n", 932 seg, phys, len); 933 panic("pciide_dma: buf align"); 934 } 935 } 936 #endif 937 dma_maps->dma_table[seg].base_addr = 938 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 939 dma_maps->dma_table[seg].byte_count = 940 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 941 IDEDMA_BYTE_COUNT_MASK); 942 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 943 seg, le32toh(dma_maps->dma_table[seg].byte_count), 944 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 945 946 } 947 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 948 htole32(IDEDMA_BYTE_COUNT_EOT); 949 950 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 951 dma_maps->dmamap_table->dm_mapsize, 952 BUS_DMASYNC_PREWRITE); 953 954 /* Maps are ready. Start DMA function */ 955 #ifdef DIAGNOSTIC 956 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 957 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 958 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 959 panic("pciide_dma_init: table align"); 960 } 961 #endif 962 963 /* Clear status bits */ 964 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 965 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, 966 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 967 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel)); 968 /* Write table addr */ 969 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 970 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel, 971 dma_maps->dmamap_table->dm_segs[0].ds_addr); 972 /* set read/write */ 973 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 974 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 975 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 976 /* remember flags */ 977 dma_maps->dma_flags = flags; 978 return 0; 979 } 980 981 void 982 pciide_dma_start(v, channel, drive) 983 void *v; 984 int channel, drive; 985 { 986 struct pciide_softc *sc = v; 987 988 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 989 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 990 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 991 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 992 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START); 993 } 994 995 int 996 pciide_dma_finish(v, channel, drive, force) 997 void *v; 998 int channel, drive; 999 int force; 1000 { 1001 struct pciide_softc *sc = v; 1002 u_int8_t status; 1003 int error = 0; 1004 struct pciide_dma_maps *dma_maps = 1005 &sc->pciide_channels[channel].dma_maps[drive]; 1006 1007 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1008 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel); 1009 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1010 DEBUG_XFERS); 1011 1012 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 1013 return WDC_DMAST_NOIRQ; 1014 1015 /* stop DMA channel */ 1016 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1017 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1018 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1019 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START); 1020 1021 /* Unload the map of the data buffer */ 1022 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1023 dma_maps->dmamap_xfer->dm_mapsize, 1024 (dma_maps->dma_flags & WDC_DMA_READ) ? 1025 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1026 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1027 1028 if ((status & IDEDMA_CTL_ERR) != 0) { 1029 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1030 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1031 error |= WDC_DMAST_ERR; 1032 } 1033 1034 if ((status & IDEDMA_CTL_INTR) == 0) { 1035 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1036 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1037 drive, status); 1038 error |= WDC_DMAST_NOIRQ; 1039 } 1040 1041 if ((status & IDEDMA_CTL_ACT) != 0) { 1042 /* data underrun, may be a valid condition for ATAPI */ 1043 error |= WDC_DMAST_UNDER; 1044 } 1045 return error; 1046 } 1047 1048 void 1049 pciide_irqack(chp) 1050 struct channel_softc *chp; 1051 { 1052 struct pciide_channel *cp = (struct pciide_channel*)chp; 1053 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1054 1055 /* clear status bits in IDE DMA registers */ 1056 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1057 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel, 1058 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1059 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel)); 1060 } 1061 1062 /* some common code used by several chip_map */ 1063 int 1064 pciide_chansetup(sc, channel, interface) 1065 struct pciide_softc *sc; 1066 int channel; 1067 pcireg_t interface; 1068 { 1069 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1070 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1071 cp->name = PCIIDE_CHANNEL_NAME(channel); 1072 cp->wdc_channel.channel = channel; 1073 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1074 cp->wdc_channel.ch_queue = 1075 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1076 if (cp->wdc_channel.ch_queue == NULL) { 1077 printf("%s %s channel: " 1078 "can't allocate memory for command queue", 1079 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1080 return 0; 1081 } 1082 printf("%s: %s channel %s to %s mode\n", 1083 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1084 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 1085 "configured" : "wired", 1086 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 1087 "native-PCI" : "compatibility"); 1088 return 1; 1089 } 1090 1091 /* some common code used by several chip channel_map */ 1092 void 1093 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1094 struct pci_attach_args *pa; 1095 struct pciide_channel *cp; 1096 pcireg_t interface; 1097 bus_size_t *cmdsizep, *ctlsizep; 1098 int (*pci_intr) __P((void *)); 1099 { 1100 struct channel_softc *wdc_cp = &cp->wdc_channel; 1101 1102 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1103 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1104 pci_intr); 1105 else 1106 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1107 wdc_cp->channel, cmdsizep, ctlsizep); 1108 1109 if (cp->hw_ok == 0) 1110 return; 1111 wdc_cp->data32iot = wdc_cp->cmd_iot; 1112 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1113 wdcattach(wdc_cp); 1114 } 1115 1116 /* 1117 * Generic code to call to know if a channel can be disabled. Return 1 1118 * if channel can be disabled, 0 if not 1119 */ 1120 int 1121 pciide_chan_candisable(cp) 1122 struct pciide_channel *cp; 1123 { 1124 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1125 struct channel_softc *wdc_cp = &cp->wdc_channel; 1126 1127 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1128 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1129 printf("%s: disabling %s channel (no drives)\n", 1130 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1131 cp->hw_ok = 0; 1132 return 1; 1133 } 1134 return 0; 1135 } 1136 1137 /* 1138 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1139 * Set hw_ok=0 on failure 1140 */ 1141 void 1142 pciide_map_compat_intr(pa, cp, compatchan, interface) 1143 struct pci_attach_args *pa; 1144 struct pciide_channel *cp; 1145 int compatchan, interface; 1146 { 1147 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1148 struct channel_softc *wdc_cp = &cp->wdc_channel; 1149 1150 if (cp->hw_ok == 0) 1151 return; 1152 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1153 return; 1154 1155 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1156 pa, compatchan, pciide_compat_intr, cp); 1157 if (cp->ih == NULL) { 1158 printf("%s: no compatibility interrupt for use by %s " 1159 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1160 cp->hw_ok = 0; 1161 } 1162 } 1163 1164 void 1165 pciide_print_modes(cp) 1166 struct pciide_channel *cp; 1167 { 1168 wdc_print_modes(&cp->wdc_channel); 1169 } 1170 1171 void 1172 default_chip_map(sc, pa) 1173 struct pciide_softc *sc; 1174 struct pci_attach_args *pa; 1175 { 1176 struct pciide_channel *cp; 1177 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1178 pcireg_t csr; 1179 int channel, drive; 1180 struct ata_drive_datas *drvp; 1181 u_int8_t idedma_ctl; 1182 bus_size_t cmdsize, ctlsize; 1183 char *failreason; 1184 1185 if (pciide_chipen(sc, pa) == 0) 1186 return; 1187 1188 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1189 printf("%s: bus-master DMA support present", 1190 sc->sc_wdcdev.sc_dev.dv_xname); 1191 if (sc->sc_pp == &default_product_desc && 1192 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1193 PCIIDE_OPTIONS_DMA) == 0) { 1194 printf(", but unused (no driver support)"); 1195 sc->sc_dma_ok = 0; 1196 } else { 1197 pciide_mapreg_dma(sc, pa); 1198 if (sc->sc_dma_ok != 0) 1199 printf(", used without full driver " 1200 "support"); 1201 } 1202 } else { 1203 printf("%s: hardware does not support DMA", 1204 sc->sc_wdcdev.sc_dev.dv_xname); 1205 sc->sc_dma_ok = 0; 1206 } 1207 printf("\n"); 1208 if (sc->sc_dma_ok) { 1209 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1210 sc->sc_wdcdev.irqack = pciide_irqack; 1211 } 1212 sc->sc_wdcdev.PIO_cap = 0; 1213 sc->sc_wdcdev.DMA_cap = 0; 1214 1215 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1216 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1217 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1218 1219 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1220 cp = &sc->pciide_channels[channel]; 1221 if (pciide_chansetup(sc, channel, interface) == 0) 1222 continue; 1223 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1224 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1225 &ctlsize, pciide_pci_intr); 1226 } else { 1227 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1228 channel, &cmdsize, &ctlsize); 1229 } 1230 if (cp->hw_ok == 0) 1231 continue; 1232 /* 1233 * Check to see if something appears to be there. 1234 */ 1235 failreason = NULL; 1236 if (!wdcprobe(&cp->wdc_channel)) { 1237 failreason = "not responding; disabled or no drives?"; 1238 goto next; 1239 } 1240 /* 1241 * Now, make sure it's actually attributable to this PCI IDE 1242 * channel by trying to access the channel again while the 1243 * PCI IDE controller's I/O space is disabled. (If the 1244 * channel no longer appears to be there, it belongs to 1245 * this controller.) YUCK! 1246 */ 1247 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1248 PCI_COMMAND_STATUS_REG); 1249 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1250 csr & ~PCI_COMMAND_IO_ENABLE); 1251 if (wdcprobe(&cp->wdc_channel)) 1252 failreason = "other hardware responding at addresses"; 1253 pci_conf_write(sc->sc_pc, sc->sc_tag, 1254 PCI_COMMAND_STATUS_REG, csr); 1255 next: 1256 if (failreason) { 1257 printf("%s: %s channel ignored (%s)\n", 1258 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1259 failreason); 1260 cp->hw_ok = 0; 1261 bus_space_unmap(cp->wdc_channel.cmd_iot, 1262 cp->wdc_channel.cmd_ioh, cmdsize); 1263 bus_space_unmap(cp->wdc_channel.ctl_iot, 1264 cp->wdc_channel.ctl_ioh, ctlsize); 1265 } else { 1266 pciide_map_compat_intr(pa, cp, channel, interface); 1267 } 1268 if (cp->hw_ok) { 1269 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1270 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1271 wdcattach(&cp->wdc_channel); 1272 } 1273 } 1274 1275 if (sc->sc_dma_ok == 0) 1276 return; 1277 1278 /* Allocate DMA maps */ 1279 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1280 idedma_ctl = 0; 1281 cp = &sc->pciide_channels[channel]; 1282 for (drive = 0; drive < 2; drive++) { 1283 drvp = &cp->wdc_channel.ch_drive[drive]; 1284 /* If no drive, skip */ 1285 if ((drvp->drive_flags & DRIVE) == 0) 1286 continue; 1287 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1288 continue; 1289 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1290 /* Abort DMA setup */ 1291 printf("%s:%d:%d: can't allocate DMA maps, " 1292 "using PIO transfers\n", 1293 sc->sc_wdcdev.sc_dev.dv_xname, 1294 channel, drive); 1295 drvp->drive_flags &= ~DRIVE_DMA; 1296 } 1297 printf("%s:%d:%d: using DMA data transfers\n", 1298 sc->sc_wdcdev.sc_dev.dv_xname, 1299 channel, drive); 1300 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1301 } 1302 if (idedma_ctl != 0) { 1303 /* Add software bits in status register */ 1304 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1305 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1306 idedma_ctl); 1307 } 1308 } 1309 } 1310 1311 void 1312 piix_chip_map(sc, pa) 1313 struct pciide_softc *sc; 1314 struct pci_attach_args *pa; 1315 { 1316 struct pciide_channel *cp; 1317 int channel; 1318 u_int32_t idetim; 1319 bus_size_t cmdsize, ctlsize; 1320 1321 if (pciide_chipen(sc, pa) == 0) 1322 return; 1323 1324 printf("%s: bus-master DMA support present", 1325 sc->sc_wdcdev.sc_dev.dv_xname); 1326 pciide_mapreg_dma(sc, pa); 1327 printf("\n"); 1328 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1329 WDC_CAPABILITY_MODE; 1330 if (sc->sc_dma_ok) { 1331 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1332 sc->sc_wdcdev.irqack = pciide_irqack; 1333 switch(sc->sc_pp->ide_product) { 1334 case PCI_PRODUCT_INTEL_82371AB_IDE: 1335 case PCI_PRODUCT_INTEL_82440MX_IDE: 1336 case PCI_PRODUCT_INTEL_82801AA_IDE: 1337 case PCI_PRODUCT_INTEL_82801AB_IDE: 1338 case PCI_PRODUCT_INTEL_82801BA_IDE: 1339 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1340 } 1341 } 1342 sc->sc_wdcdev.PIO_cap = 4; 1343 sc->sc_wdcdev.DMA_cap = 2; 1344 switch(sc->sc_pp->ide_product) { 1345 case PCI_PRODUCT_INTEL_82801AA_IDE: 1346 case PCI_PRODUCT_INTEL_82801BA_IDE: 1347 sc->sc_wdcdev.UDMA_cap = 4; 1348 break; 1349 default: 1350 sc->sc_wdcdev.UDMA_cap = 2; 1351 } 1352 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1353 sc->sc_wdcdev.set_modes = piix_setup_channel; 1354 else 1355 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1356 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1357 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1358 1359 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1360 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1361 DEBUG_PROBE); 1362 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1363 WDCDEBUG_PRINT((", sidetim=0x%x", 1364 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1365 DEBUG_PROBE); 1366 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1367 WDCDEBUG_PRINT((", udamreg 0x%x", 1368 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1369 DEBUG_PROBE); 1370 } 1371 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1372 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1373 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1374 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1375 DEBUG_PROBE); 1376 } 1377 1378 } 1379 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1380 1381 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1382 cp = &sc->pciide_channels[channel]; 1383 /* PIIX is compat-only */ 1384 if (pciide_chansetup(sc, channel, 0) == 0) 1385 continue; 1386 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1387 if ((PIIX_IDETIM_READ(idetim, channel) & 1388 PIIX_IDETIM_IDE) == 0) { 1389 printf("%s: %s channel ignored (disabled)\n", 1390 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1391 continue; 1392 } 1393 /* PIIX are compat-only pciide devices */ 1394 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1395 if (cp->hw_ok == 0) 1396 continue; 1397 if (pciide_chan_candisable(cp)) { 1398 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1399 channel); 1400 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1401 idetim); 1402 } 1403 pciide_map_compat_intr(pa, cp, channel, 0); 1404 if (cp->hw_ok == 0) 1405 continue; 1406 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1407 } 1408 1409 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1410 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1411 DEBUG_PROBE); 1412 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1413 WDCDEBUG_PRINT((", sidetim=0x%x", 1414 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1415 DEBUG_PROBE); 1416 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1417 WDCDEBUG_PRINT((", udamreg 0x%x", 1418 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1419 DEBUG_PROBE); 1420 } 1421 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1422 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1423 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1424 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1425 DEBUG_PROBE); 1426 } 1427 } 1428 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1429 } 1430 1431 void 1432 piix_setup_channel(chp) 1433 struct channel_softc *chp; 1434 { 1435 u_int8_t mode[2], drive; 1436 u_int32_t oidetim, idetim, idedma_ctl; 1437 struct pciide_channel *cp = (struct pciide_channel*)chp; 1438 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1439 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1440 1441 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1442 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1443 idedma_ctl = 0; 1444 1445 /* set up new idetim: Enable IDE registers decode */ 1446 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1447 chp->channel); 1448 1449 /* setup DMA */ 1450 pciide_channel_dma_setup(cp); 1451 1452 /* 1453 * Here we have to mess up with drives mode: PIIX can't have 1454 * different timings for master and slave drives. 1455 * We need to find the best combination. 1456 */ 1457 1458 /* If both drives supports DMA, take the lower mode */ 1459 if ((drvp[0].drive_flags & DRIVE_DMA) && 1460 (drvp[1].drive_flags & DRIVE_DMA)) { 1461 mode[0] = mode[1] = 1462 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1463 drvp[0].DMA_mode = mode[0]; 1464 drvp[1].DMA_mode = mode[1]; 1465 goto ok; 1466 } 1467 /* 1468 * If only one drive supports DMA, use its mode, and 1469 * put the other one in PIO mode 0 if mode not compatible 1470 */ 1471 if (drvp[0].drive_flags & DRIVE_DMA) { 1472 mode[0] = drvp[0].DMA_mode; 1473 mode[1] = drvp[1].PIO_mode; 1474 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1475 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1476 mode[1] = drvp[1].PIO_mode = 0; 1477 goto ok; 1478 } 1479 if (drvp[1].drive_flags & DRIVE_DMA) { 1480 mode[1] = drvp[1].DMA_mode; 1481 mode[0] = drvp[0].PIO_mode; 1482 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1483 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1484 mode[0] = drvp[0].PIO_mode = 0; 1485 goto ok; 1486 } 1487 /* 1488 * If both drives are not DMA, takes the lower mode, unless 1489 * one of them is PIO mode < 2 1490 */ 1491 if (drvp[0].PIO_mode < 2) { 1492 mode[0] = drvp[0].PIO_mode = 0; 1493 mode[1] = drvp[1].PIO_mode; 1494 } else if (drvp[1].PIO_mode < 2) { 1495 mode[1] = drvp[1].PIO_mode = 0; 1496 mode[0] = drvp[0].PIO_mode; 1497 } else { 1498 mode[0] = mode[1] = 1499 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1500 drvp[0].PIO_mode = mode[0]; 1501 drvp[1].PIO_mode = mode[1]; 1502 } 1503 ok: /* The modes are setup */ 1504 for (drive = 0; drive < 2; drive++) { 1505 if (drvp[drive].drive_flags & DRIVE_DMA) { 1506 idetim |= piix_setup_idetim_timings( 1507 mode[drive], 1, chp->channel); 1508 goto end; 1509 } 1510 } 1511 /* If we are there, none of the drives are DMA */ 1512 if (mode[0] >= 2) 1513 idetim |= piix_setup_idetim_timings( 1514 mode[0], 0, chp->channel); 1515 else 1516 idetim |= piix_setup_idetim_timings( 1517 mode[1], 0, chp->channel); 1518 end: /* 1519 * timing mode is now set up in the controller. Enable 1520 * it per-drive 1521 */ 1522 for (drive = 0; drive < 2; drive++) { 1523 /* If no drive, skip */ 1524 if ((drvp[drive].drive_flags & DRIVE) == 0) 1525 continue; 1526 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1527 if (drvp[drive].drive_flags & DRIVE_DMA) 1528 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1529 } 1530 if (idedma_ctl != 0) { 1531 /* Add software bits in status register */ 1532 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1533 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1534 idedma_ctl); 1535 } 1536 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1537 pciide_print_modes(cp); 1538 } 1539 1540 void 1541 piix3_4_setup_channel(chp) 1542 struct channel_softc *chp; 1543 { 1544 struct ata_drive_datas *drvp; 1545 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1546 struct pciide_channel *cp = (struct pciide_channel*)chp; 1547 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1548 int drive; 1549 int channel = chp->channel; 1550 1551 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1552 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1553 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1554 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1555 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1556 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1557 PIIX_SIDETIM_RTC_MASK(channel)); 1558 1559 idedma_ctl = 0; 1560 /* If channel disabled, no need to go further */ 1561 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1562 return; 1563 /* set up new idetim: Enable IDE registers decode */ 1564 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1565 1566 /* setup DMA if needed */ 1567 pciide_channel_dma_setup(cp); 1568 1569 for (drive = 0; drive < 2; drive++) { 1570 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 1571 PIIX_UDMATIM_SET(0x3, channel, drive)); 1572 drvp = &chp->ch_drive[drive]; 1573 /* If no drive, skip */ 1574 if ((drvp->drive_flags & DRIVE) == 0) 1575 continue; 1576 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1577 (drvp->drive_flags & DRIVE_UDMA) == 0)) 1578 goto pio; 1579 1580 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1581 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1582 ideconf |= PIIX_CONFIG_PINGPONG; 1583 } 1584 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 1585 /* setup Ultra/66 */ 1586 if (drvp->UDMA_mode > 2 && 1587 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1588 drvp->UDMA_mode = 2; 1589 if (drvp->UDMA_mode > 2) 1590 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 1591 else 1592 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 1593 } 1594 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1595 (drvp->drive_flags & DRIVE_UDMA)) { 1596 /* use Ultra/DMA */ 1597 drvp->drive_flags &= ~DRIVE_DMA; 1598 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 1599 udmareg |= PIIX_UDMATIM_SET( 1600 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 1601 } else { 1602 /* use Multiword DMA */ 1603 drvp->drive_flags &= ~DRIVE_UDMA; 1604 if (drive == 0) { 1605 idetim |= piix_setup_idetim_timings( 1606 drvp->DMA_mode, 1, channel); 1607 } else { 1608 sidetim |= piix_setup_sidetim_timings( 1609 drvp->DMA_mode, 1, channel); 1610 idetim =PIIX_IDETIM_SET(idetim, 1611 PIIX_IDETIM_SITRE, channel); 1612 } 1613 } 1614 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1615 1616 pio: /* use PIO mode */ 1617 idetim |= piix_setup_idetim_drvs(drvp); 1618 if (drive == 0) { 1619 idetim |= piix_setup_idetim_timings( 1620 drvp->PIO_mode, 0, channel); 1621 } else { 1622 sidetim |= piix_setup_sidetim_timings( 1623 drvp->PIO_mode, 0, channel); 1624 idetim =PIIX_IDETIM_SET(idetim, 1625 PIIX_IDETIM_SITRE, channel); 1626 } 1627 } 1628 if (idedma_ctl != 0) { 1629 /* Add software bits in status register */ 1630 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1631 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1632 idedma_ctl); 1633 } 1634 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1635 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 1636 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 1637 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 1638 pciide_print_modes(cp); 1639 } 1640 1641 1642 /* setup ISP and RTC fields, based on mode */ 1643 static u_int32_t 1644 piix_setup_idetim_timings(mode, dma, channel) 1645 u_int8_t mode; 1646 u_int8_t dma; 1647 u_int8_t channel; 1648 { 1649 1650 if (dma) 1651 return PIIX_IDETIM_SET(0, 1652 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 1653 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 1654 channel); 1655 else 1656 return PIIX_IDETIM_SET(0, 1657 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 1658 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 1659 channel); 1660 } 1661 1662 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 1663 static u_int32_t 1664 piix_setup_idetim_drvs(drvp) 1665 struct ata_drive_datas *drvp; 1666 { 1667 u_int32_t ret = 0; 1668 struct channel_softc *chp = drvp->chnl_softc; 1669 u_int8_t channel = chp->channel; 1670 u_int8_t drive = drvp->drive; 1671 1672 /* 1673 * If drive is using UDMA, timings setups are independant 1674 * So just check DMA and PIO here. 1675 */ 1676 if (drvp->drive_flags & DRIVE_DMA) { 1677 /* if mode = DMA mode 0, use compatible timings */ 1678 if ((drvp->drive_flags & DRIVE_DMA) && 1679 drvp->DMA_mode == 0) { 1680 drvp->PIO_mode = 0; 1681 return ret; 1682 } 1683 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1684 /* 1685 * PIO and DMA timings are the same, use fast timings for PIO 1686 * too, else use compat timings. 1687 */ 1688 if ((piix_isp_pio[drvp->PIO_mode] != 1689 piix_isp_dma[drvp->DMA_mode]) || 1690 (piix_rtc_pio[drvp->PIO_mode] != 1691 piix_rtc_dma[drvp->DMA_mode])) 1692 drvp->PIO_mode = 0; 1693 /* if PIO mode <= 2, use compat timings for PIO */ 1694 if (drvp->PIO_mode <= 2) { 1695 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 1696 channel); 1697 return ret; 1698 } 1699 } 1700 1701 /* 1702 * Now setup PIO modes. If mode < 2, use compat timings. 1703 * Else enable fast timings. Enable IORDY and prefetch/post 1704 * if PIO mode >= 3. 1705 */ 1706 1707 if (drvp->PIO_mode < 2) 1708 return ret; 1709 1710 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1711 if (drvp->PIO_mode >= 3) { 1712 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 1713 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 1714 } 1715 return ret; 1716 } 1717 1718 /* setup values in SIDETIM registers, based on mode */ 1719 static u_int32_t 1720 piix_setup_sidetim_timings(mode, dma, channel) 1721 u_int8_t mode; 1722 u_int8_t dma; 1723 u_int8_t channel; 1724 { 1725 if (dma) 1726 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 1727 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 1728 else 1729 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 1730 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 1731 } 1732 1733 void 1734 amd756_chip_map(sc, pa) 1735 struct pciide_softc *sc; 1736 struct pci_attach_args *pa; 1737 { 1738 struct pciide_channel *cp; 1739 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1740 int channel; 1741 pcireg_t chanenable; 1742 bus_size_t cmdsize, ctlsize; 1743 1744 if (pciide_chipen(sc, pa) == 0) 1745 return; 1746 printf("%s: bus-master DMA support present", 1747 sc->sc_wdcdev.sc_dev.dv_xname); 1748 pciide_mapreg_dma(sc, pa); 1749 printf("\n"); 1750 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1751 WDC_CAPABILITY_MODE; 1752 if (sc->sc_dma_ok) { 1753 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 1754 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 1755 sc->sc_wdcdev.irqack = pciide_irqack; 1756 } 1757 sc->sc_wdcdev.PIO_cap = 4; 1758 sc->sc_wdcdev.DMA_cap = 2; 1759 sc->sc_wdcdev.UDMA_cap = 4; 1760 sc->sc_wdcdev.set_modes = amd756_setup_channel; 1761 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1762 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1763 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 1764 1765 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable), 1766 DEBUG_PROBE); 1767 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1768 cp = &sc->pciide_channels[channel]; 1769 if (pciide_chansetup(sc, channel, interface) == 0) 1770 continue; 1771 1772 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 1773 printf("%s: %s channel ignored (disabled)\n", 1774 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1775 continue; 1776 } 1777 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1778 pciide_pci_intr); 1779 1780 if (pciide_chan_candisable(cp)) 1781 chanenable &= ~AMD756_CHAN_EN(channel); 1782 pciide_map_compat_intr(pa, cp, channel, interface); 1783 if (cp->hw_ok == 0) 1784 continue; 1785 1786 amd756_setup_channel(&cp->wdc_channel); 1787 } 1788 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 1789 chanenable); 1790 return; 1791 } 1792 1793 void 1794 amd756_setup_channel(chp) 1795 struct channel_softc *chp; 1796 { 1797 u_int32_t udmatim_reg, datatim_reg; 1798 u_int8_t idedma_ctl; 1799 int mode, drive; 1800 struct ata_drive_datas *drvp; 1801 struct pciide_channel *cp = (struct pciide_channel*)chp; 1802 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1803 #ifndef PCIIDE_AMD756_ENABLEDMA 1804 int rev = PCI_REVISION( 1805 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 1806 #endif 1807 1808 idedma_ctl = 0; 1809 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 1810 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 1811 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 1812 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 1813 1814 /* setup DMA if needed */ 1815 pciide_channel_dma_setup(cp); 1816 1817 for (drive = 0; drive < 2; drive++) { 1818 drvp = &chp->ch_drive[drive]; 1819 /* If no drive, skip */ 1820 if ((drvp->drive_flags & DRIVE) == 0) 1821 continue; 1822 /* add timing values, setup DMA if needed */ 1823 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1824 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 1825 mode = drvp->PIO_mode; 1826 goto pio; 1827 } 1828 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1829 (drvp->drive_flags & DRIVE_UDMA)) { 1830 /* use Ultra/DMA */ 1831 drvp->drive_flags &= ~DRIVE_DMA; 1832 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 1833 AMD756_UDMA_EN_MTH(chp->channel, drive) | 1834 AMD756_UDMA_TIME(chp->channel, drive, 1835 amd756_udma_tim[drvp->UDMA_mode]); 1836 /* can use PIO timings, MW DMA unused */ 1837 mode = drvp->PIO_mode; 1838 } else { 1839 /* use Multiword DMA, but only if revision is OK */ 1840 drvp->drive_flags &= ~DRIVE_UDMA; 1841 #ifndef PCIIDE_AMD756_ENABLEDMA 1842 /* 1843 * The workaround doesn't seem to be necessary 1844 * with all drives, so it can be disabled by 1845 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 1846 * triggered. 1847 */ 1848 if (AMD756_CHIPREV_DISABLEDMA(rev)) { 1849 printf("%s:%d:%d: multi-word DMA disabled due " 1850 "to chip revision\n", 1851 sc->sc_wdcdev.sc_dev.dv_xname, 1852 chp->channel, drive); 1853 mode = drvp->PIO_mode; 1854 drvp->drive_flags &= ~DRIVE_DMA; 1855 goto pio; 1856 } 1857 #endif 1858 /* mode = min(pio, dma+2) */ 1859 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 1860 mode = drvp->PIO_mode; 1861 else 1862 mode = drvp->DMA_mode + 2; 1863 } 1864 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1865 1866 pio: /* setup PIO mode */ 1867 if (mode <= 2) { 1868 drvp->DMA_mode = 0; 1869 drvp->PIO_mode = 0; 1870 mode = 0; 1871 } else { 1872 drvp->PIO_mode = mode; 1873 drvp->DMA_mode = mode - 2; 1874 } 1875 datatim_reg |= 1876 AMD756_DATATIM_PULSE(chp->channel, drive, 1877 amd756_pio_set[mode]) | 1878 AMD756_DATATIM_RECOV(chp->channel, drive, 1879 amd756_pio_rec[mode]); 1880 } 1881 if (idedma_ctl != 0) { 1882 /* Add software bits in status register */ 1883 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1884 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1885 idedma_ctl); 1886 } 1887 pciide_print_modes(cp); 1888 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 1889 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 1890 } 1891 1892 void 1893 apollo_chip_map(sc, pa) 1894 struct pciide_softc *sc; 1895 struct pci_attach_args *pa; 1896 { 1897 struct pciide_channel *cp; 1898 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1899 int rev = PCI_REVISION(pa->pa_class); 1900 int channel; 1901 u_int32_t ideconf; 1902 bus_size_t cmdsize, ctlsize; 1903 1904 if (pciide_chipen(sc, pa) == 0) 1905 return; 1906 printf("%s: bus-master DMA support present", 1907 sc->sc_wdcdev.sc_dev.dv_xname); 1908 pciide_mapreg_dma(sc, pa); 1909 printf("\n"); 1910 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1911 WDC_CAPABILITY_MODE; 1912 if (sc->sc_dma_ok) { 1913 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1914 sc->sc_wdcdev.irqack = pciide_irqack; 1915 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE 1916 && rev >= 6) 1917 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1918 } 1919 sc->sc_wdcdev.PIO_cap = 4; 1920 sc->sc_wdcdev.DMA_cap = 2; 1921 sc->sc_wdcdev.UDMA_cap = 2; 1922 sc->sc_wdcdev.set_modes = apollo_setup_channel; 1923 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1924 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1925 1926 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 1927 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 1928 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 1929 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 1930 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 1931 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 1932 DEBUG_PROBE); 1933 1934 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1935 cp = &sc->pciide_channels[channel]; 1936 if (pciide_chansetup(sc, channel, interface) == 0) 1937 continue; 1938 1939 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 1940 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 1941 printf("%s: %s channel ignored (disabled)\n", 1942 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1943 continue; 1944 } 1945 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1946 pciide_pci_intr); 1947 if (cp->hw_ok == 0) 1948 continue; 1949 if (pciide_chan_candisable(cp)) { 1950 ideconf &= ~APO_IDECONF_EN(channel); 1951 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 1952 ideconf); 1953 } 1954 pciide_map_compat_intr(pa, cp, channel, interface); 1955 1956 if (cp->hw_ok == 0) 1957 continue; 1958 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 1959 } 1960 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 1961 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 1962 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 1963 } 1964 1965 void 1966 apollo_setup_channel(chp) 1967 struct channel_softc *chp; 1968 { 1969 u_int32_t udmatim_reg, datatim_reg; 1970 u_int8_t idedma_ctl; 1971 int mode, drive; 1972 struct ata_drive_datas *drvp; 1973 struct pciide_channel *cp = (struct pciide_channel*)chp; 1974 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1975 1976 idedma_ctl = 0; 1977 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 1978 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 1979 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 1980 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 1981 1982 /* setup DMA if needed */ 1983 pciide_channel_dma_setup(cp); 1984 1985 for (drive = 0; drive < 2; drive++) { 1986 drvp = &chp->ch_drive[drive]; 1987 /* If no drive, skip */ 1988 if ((drvp->drive_flags & DRIVE) == 0) 1989 continue; 1990 /* add timing values, setup DMA if needed */ 1991 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1992 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 1993 mode = drvp->PIO_mode; 1994 goto pio; 1995 } 1996 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1997 (drvp->drive_flags & DRIVE_UDMA)) { 1998 /* use Ultra/DMA */ 1999 drvp->drive_flags &= ~DRIVE_DMA; 2000 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2001 APO_UDMA_EN_MTH(chp->channel, drive) | 2002 APO_UDMA_TIME(chp->channel, drive, 2003 apollo_udma_tim[drvp->UDMA_mode]); 2004 /* can use PIO timings, MW DMA unused */ 2005 mode = drvp->PIO_mode; 2006 } else { 2007 /* use Multiword DMA */ 2008 drvp->drive_flags &= ~DRIVE_UDMA; 2009 /* mode = min(pio, dma+2) */ 2010 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2011 mode = drvp->PIO_mode; 2012 else 2013 mode = drvp->DMA_mode + 2; 2014 } 2015 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2016 2017 pio: /* setup PIO mode */ 2018 if (mode <= 2) { 2019 drvp->DMA_mode = 0; 2020 drvp->PIO_mode = 0; 2021 mode = 0; 2022 } else { 2023 drvp->PIO_mode = mode; 2024 drvp->DMA_mode = mode - 2; 2025 } 2026 datatim_reg |= 2027 APO_DATATIM_PULSE(chp->channel, drive, 2028 apollo_pio_set[mode]) | 2029 APO_DATATIM_RECOV(chp->channel, drive, 2030 apollo_pio_rec[mode]); 2031 } 2032 if (idedma_ctl != 0) { 2033 /* Add software bits in status register */ 2034 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2035 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2036 idedma_ctl); 2037 } 2038 pciide_print_modes(cp); 2039 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2040 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2041 } 2042 2043 void 2044 cmd_channel_map(pa, sc, channel) 2045 struct pci_attach_args *pa; 2046 struct pciide_softc *sc; 2047 int channel; 2048 { 2049 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2050 bus_size_t cmdsize, ctlsize; 2051 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2052 int interface; 2053 2054 /* 2055 * The 0648/0649 can be told to identify as a RAID controller. 2056 * In this case, we have to fake interface 2057 */ 2058 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2059 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2060 PCIIDE_INTERFACE_SETTABLE(1); 2061 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2062 CMD_CONF_DSA1) 2063 interface |= PCIIDE_INTERFACE_PCI(0) | 2064 PCIIDE_INTERFACE_PCI(1); 2065 } else { 2066 interface = PCI_INTERFACE(pa->pa_class); 2067 } 2068 2069 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2070 cp->name = PCIIDE_CHANNEL_NAME(channel); 2071 cp->wdc_channel.channel = channel; 2072 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2073 2074 if (channel > 0) { 2075 cp->wdc_channel.ch_queue = 2076 sc->pciide_channels[0].wdc_channel.ch_queue; 2077 } else { 2078 cp->wdc_channel.ch_queue = 2079 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2080 } 2081 if (cp->wdc_channel.ch_queue == NULL) { 2082 printf("%s %s channel: " 2083 "can't allocate memory for command queue", 2084 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2085 return; 2086 } 2087 2088 printf("%s: %s channel %s to %s mode\n", 2089 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2090 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2091 "configured" : "wired", 2092 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2093 "native-PCI" : "compatibility"); 2094 2095 /* 2096 * with a CMD PCI64x, if we get here, the first channel is enabled: 2097 * there's no way to disable the first channel without disabling 2098 * the whole device 2099 */ 2100 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2101 printf("%s: %s channel ignored (disabled)\n", 2102 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2103 return; 2104 } 2105 2106 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2107 if (cp->hw_ok == 0) 2108 return; 2109 if (channel == 1) { 2110 if (pciide_chan_candisable(cp)) { 2111 ctrl &= ~CMD_CTRL_2PORT; 2112 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2113 CMD_CTRL, ctrl); 2114 } 2115 } 2116 pciide_map_compat_intr(pa, cp, channel, interface); 2117 } 2118 2119 int 2120 cmd_pci_intr(arg) 2121 void *arg; 2122 { 2123 struct pciide_softc *sc = arg; 2124 struct pciide_channel *cp; 2125 struct channel_softc *wdc_cp; 2126 int i, rv, crv; 2127 u_int32_t priirq, secirq; 2128 2129 rv = 0; 2130 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2131 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2132 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2133 cp = &sc->pciide_channels[i]; 2134 wdc_cp = &cp->wdc_channel; 2135 /* If a compat channel skip. */ 2136 if (cp->compat) 2137 continue; 2138 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2139 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2140 crv = wdcintr(wdc_cp); 2141 if (crv == 0) 2142 printf("%s:%d: bogus intr\n", 2143 sc->sc_wdcdev.sc_dev.dv_xname, i); 2144 else 2145 rv = 1; 2146 } 2147 } 2148 return rv; 2149 } 2150 2151 void 2152 cmd_chip_map(sc, pa) 2153 struct pciide_softc *sc; 2154 struct pci_attach_args *pa; 2155 { 2156 int channel; 2157 2158 /* 2159 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2160 * and base adresses registers can be disabled at 2161 * hardware level. In this case, the device is wired 2162 * in compat mode and its first channel is always enabled, 2163 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2164 * In fact, it seems that the first channel of the CMD PCI0640 2165 * can't be disabled. 2166 */ 2167 2168 #ifdef PCIIDE_CMD064x_DISABLE 2169 if (pciide_chipen(sc, pa) == 0) 2170 return; 2171 #endif 2172 2173 printf("%s: hardware does not support DMA\n", 2174 sc->sc_wdcdev.sc_dev.dv_xname); 2175 sc->sc_dma_ok = 0; 2176 2177 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2178 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2179 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2180 2181 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2182 cmd_channel_map(pa, sc, channel); 2183 } 2184 } 2185 2186 void 2187 cmd0643_9_chip_map(sc, pa) 2188 struct pciide_softc *sc; 2189 struct pci_attach_args *pa; 2190 { 2191 struct pciide_channel *cp; 2192 int channel; 2193 int rev = PCI_REVISION( 2194 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2195 2196 /* 2197 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2198 * and base adresses registers can be disabled at 2199 * hardware level. In this case, the device is wired 2200 * in compat mode and its first channel is always enabled, 2201 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2202 * In fact, it seems that the first channel of the CMD PCI0640 2203 * can't be disabled. 2204 */ 2205 2206 #ifdef PCIIDE_CMD064x_DISABLE 2207 if (pciide_chipen(sc, pa) == 0) 2208 return; 2209 #endif 2210 printf("%s: bus-master DMA support present", 2211 sc->sc_wdcdev.sc_dev.dv_xname); 2212 pciide_mapreg_dma(sc, pa); 2213 printf("\n"); 2214 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2215 WDC_CAPABILITY_MODE; 2216 if (sc->sc_dma_ok) { 2217 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2218 switch (sc->sc_pp->ide_product) { 2219 case PCI_PRODUCT_CMDTECH_649: 2220 case PCI_PRODUCT_CMDTECH_648: 2221 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2222 sc->sc_wdcdev.UDMA_cap = 4; 2223 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2224 break; 2225 case PCI_PRODUCT_CMDTECH_646: 2226 if (rev >= CMD0646U2_REV) { 2227 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2228 sc->sc_wdcdev.UDMA_cap = 2; 2229 } else if (rev >= CMD0646U_REV) { 2230 /* 2231 * Linux's driver claims that the 646U is broken 2232 * with UDMA. Only enable it if we know what we're 2233 * doing 2234 */ 2235 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2236 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2237 sc->sc_wdcdev.UDMA_cap = 2; 2238 #endif 2239 /* explicitely disable UDMA */ 2240 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2241 CMD_UDMATIM(0), 0); 2242 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2243 CMD_UDMATIM(1), 0); 2244 } 2245 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2246 break; 2247 default: 2248 sc->sc_wdcdev.irqack = pciide_irqack; 2249 } 2250 } 2251 2252 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2253 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2254 sc->sc_wdcdev.PIO_cap = 4; 2255 sc->sc_wdcdev.DMA_cap = 2; 2256 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2257 2258 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2259 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2260 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2261 DEBUG_PROBE); 2262 2263 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2264 cp = &sc->pciide_channels[channel]; 2265 cmd_channel_map(pa, sc, channel); 2266 if (cp->hw_ok == 0) 2267 continue; 2268 cmd0643_9_setup_channel(&cp->wdc_channel); 2269 } 2270 /* 2271 * note - this also makes sure we clear the irq disable and reset 2272 * bits 2273 */ 2274 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2275 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2276 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2277 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2278 DEBUG_PROBE); 2279 } 2280 2281 void 2282 cmd0643_9_setup_channel(chp) 2283 struct channel_softc *chp; 2284 { 2285 struct ata_drive_datas *drvp; 2286 u_int8_t tim; 2287 u_int32_t idedma_ctl, udma_reg; 2288 int drive; 2289 struct pciide_channel *cp = (struct pciide_channel*)chp; 2290 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2291 2292 idedma_ctl = 0; 2293 /* setup DMA if needed */ 2294 pciide_channel_dma_setup(cp); 2295 2296 for (drive = 0; drive < 2; drive++) { 2297 drvp = &chp->ch_drive[drive]; 2298 /* If no drive, skip */ 2299 if ((drvp->drive_flags & DRIVE) == 0) 2300 continue; 2301 /* add timing values, setup DMA if needed */ 2302 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2303 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2304 if (drvp->drive_flags & DRIVE_UDMA) { 2305 /* UltraDMA on a 646U2, 0648 or 0649 */ 2306 udma_reg = pciide_pci_read(sc->sc_pc, 2307 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2308 if (drvp->UDMA_mode > 2 && 2309 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2310 CMD_BICSR) & 2311 CMD_BICSR_80(chp->channel)) == 0) 2312 drvp->UDMA_mode = 2; 2313 if (drvp->UDMA_mode > 2) 2314 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2315 else if (sc->sc_wdcdev.UDMA_cap > 2) 2316 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2317 udma_reg |= CMD_UDMATIM_UDMA(drive); 2318 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2319 CMD_UDMATIM_TIM_OFF(drive)); 2320 udma_reg |= 2321 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 2322 CMD_UDMATIM_TIM_OFF(drive)); 2323 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2324 CMD_UDMATIM(chp->channel), udma_reg); 2325 } else { 2326 /* 2327 * use Multiword DMA. 2328 * Timings will be used for both PIO and DMA, 2329 * so adjust DMA mode if needed 2330 * if we have a 0646U2/8/9, turn off UDMA 2331 */ 2332 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2333 udma_reg = pciide_pci_read(sc->sc_pc, 2334 sc->sc_tag, 2335 CMD_UDMATIM(chp->channel)); 2336 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2337 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2338 CMD_UDMATIM(chp->channel), 2339 udma_reg); 2340 } 2341 if (drvp->PIO_mode >= 3 && 2342 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2343 drvp->DMA_mode = drvp->PIO_mode - 2; 2344 } 2345 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2346 } 2347 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2348 } 2349 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2350 CMD_DATA_TIM(chp->channel, drive), tim); 2351 } 2352 if (idedma_ctl != 0) { 2353 /* Add software bits in status register */ 2354 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2355 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2356 idedma_ctl); 2357 } 2358 pciide_print_modes(cp); 2359 } 2360 2361 void 2362 cmd646_9_irqack(chp) 2363 struct channel_softc *chp; 2364 { 2365 u_int32_t priirq, secirq; 2366 struct pciide_channel *cp = (struct pciide_channel*)chp; 2367 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2368 2369 if (chp->channel == 0) { 2370 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2371 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2372 } else { 2373 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2374 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2375 } 2376 pciide_irqack(chp); 2377 } 2378 2379 void 2380 cy693_chip_map(sc, pa) 2381 struct pciide_softc *sc; 2382 struct pci_attach_args *pa; 2383 { 2384 struct pciide_channel *cp; 2385 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2386 bus_size_t cmdsize, ctlsize; 2387 2388 if (pciide_chipen(sc, pa) == 0) 2389 return; 2390 /* 2391 * this chip has 2 PCI IDE functions, one for primary and one for 2392 * secondary. So we need to call pciide_mapregs_compat() with 2393 * the real channel 2394 */ 2395 if (pa->pa_function == 1) { 2396 sc->sc_cy_compatchan = 0; 2397 } else if (pa->pa_function == 2) { 2398 sc->sc_cy_compatchan = 1; 2399 } else { 2400 printf("%s: unexpected PCI function %d\n", 2401 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2402 return; 2403 } 2404 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2405 printf("%s: bus-master DMA support present", 2406 sc->sc_wdcdev.sc_dev.dv_xname); 2407 pciide_mapreg_dma(sc, pa); 2408 } else { 2409 printf("%s: hardware does not support DMA", 2410 sc->sc_wdcdev.sc_dev.dv_xname); 2411 sc->sc_dma_ok = 0; 2412 } 2413 printf("\n"); 2414 2415 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 2416 if (sc->sc_cy_handle == NULL) { 2417 printf("%s: unable to map hyperCache control registers\n", 2418 sc->sc_wdcdev.sc_dev.dv_xname); 2419 sc->sc_dma_ok = 0; 2420 } 2421 2422 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2423 WDC_CAPABILITY_MODE; 2424 if (sc->sc_dma_ok) { 2425 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2426 sc->sc_wdcdev.irqack = pciide_irqack; 2427 } 2428 sc->sc_wdcdev.PIO_cap = 4; 2429 sc->sc_wdcdev.DMA_cap = 2; 2430 sc->sc_wdcdev.set_modes = cy693_setup_channel; 2431 2432 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2433 sc->sc_wdcdev.nchannels = 1; 2434 2435 /* Only one channel for this chip; if we are here it's enabled */ 2436 cp = &sc->pciide_channels[0]; 2437 sc->wdc_chanarray[0] = &cp->wdc_channel; 2438 cp->name = PCIIDE_CHANNEL_NAME(0); 2439 cp->wdc_channel.channel = 0; 2440 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2441 cp->wdc_channel.ch_queue = 2442 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2443 if (cp->wdc_channel.ch_queue == NULL) { 2444 printf("%s primary channel: " 2445 "can't allocate memory for command queue", 2446 sc->sc_wdcdev.sc_dev.dv_xname); 2447 return; 2448 } 2449 printf("%s: primary channel %s to ", 2450 sc->sc_wdcdev.sc_dev.dv_xname, 2451 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 2452 "configured" : "wired"); 2453 if (interface & PCIIDE_INTERFACE_PCI(0)) { 2454 printf("native-PCI"); 2455 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 2456 pciide_pci_intr); 2457 } else { 2458 printf("compatibility"); 2459 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 2460 &cmdsize, &ctlsize); 2461 } 2462 printf(" mode\n"); 2463 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2464 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2465 wdcattach(&cp->wdc_channel); 2466 if (pciide_chan_candisable(cp)) { 2467 pci_conf_write(sc->sc_pc, sc->sc_tag, 2468 PCI_COMMAND_STATUS_REG, 0); 2469 } 2470 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 2471 if (cp->hw_ok == 0) 2472 return; 2473 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 2474 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 2475 cy693_setup_channel(&cp->wdc_channel); 2476 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 2477 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 2478 } 2479 2480 void 2481 cy693_setup_channel(chp) 2482 struct channel_softc *chp; 2483 { 2484 struct ata_drive_datas *drvp; 2485 int drive; 2486 u_int32_t cy_cmd_ctrl; 2487 u_int32_t idedma_ctl; 2488 struct pciide_channel *cp = (struct pciide_channel*)chp; 2489 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2490 int dma_mode = -1; 2491 2492 cy_cmd_ctrl = idedma_ctl = 0; 2493 2494 /* setup DMA if needed */ 2495 pciide_channel_dma_setup(cp); 2496 2497 for (drive = 0; drive < 2; drive++) { 2498 drvp = &chp->ch_drive[drive]; 2499 /* If no drive, skip */ 2500 if ((drvp->drive_flags & DRIVE) == 0) 2501 continue; 2502 /* add timing values, setup DMA if needed */ 2503 if (drvp->drive_flags & DRIVE_DMA) { 2504 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2505 /* use Multiword DMA */ 2506 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 2507 dma_mode = drvp->DMA_mode; 2508 } 2509 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2510 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 2511 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2512 CY_CMD_CTRL_IOW_REC_OFF(drive)); 2513 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2514 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 2515 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2516 CY_CMD_CTRL_IOR_REC_OFF(drive)); 2517 } 2518 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 2519 chp->ch_drive[0].DMA_mode = dma_mode; 2520 chp->ch_drive[1].DMA_mode = dma_mode; 2521 2522 if (dma_mode == -1) 2523 dma_mode = 0; 2524 2525 if (sc->sc_cy_handle != NULL) { 2526 /* Note: `multiple' is implied. */ 2527 cy82c693_write(sc->sc_cy_handle, 2528 (sc->sc_cy_compatchan == 0) ? 2529 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 2530 } 2531 2532 pciide_print_modes(cp); 2533 2534 if (idedma_ctl != 0) { 2535 /* Add software bits in status register */ 2536 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2537 IDEDMA_CTL, idedma_ctl); 2538 } 2539 } 2540 2541 void 2542 sis_chip_map(sc, pa) 2543 struct pciide_softc *sc; 2544 struct pci_attach_args *pa; 2545 { 2546 struct pciide_channel *cp; 2547 int channel; 2548 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 2549 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2550 pcireg_t rev = PCI_REVISION(pa->pa_class); 2551 bus_size_t cmdsize, ctlsize; 2552 2553 if (pciide_chipen(sc, pa) == 0) 2554 return; 2555 printf("%s: bus-master DMA support present", 2556 sc->sc_wdcdev.sc_dev.dv_xname); 2557 pciide_mapreg_dma(sc, pa); 2558 printf("\n"); 2559 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2560 WDC_CAPABILITY_MODE; 2561 if (sc->sc_dma_ok) { 2562 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2563 sc->sc_wdcdev.irqack = pciide_irqack; 2564 if (rev > 0xd0) 2565 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2566 } 2567 2568 sc->sc_wdcdev.PIO_cap = 4; 2569 sc->sc_wdcdev.DMA_cap = 2; 2570 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) 2571 sc->sc_wdcdev.UDMA_cap = 2; 2572 sc->sc_wdcdev.set_modes = sis_setup_channel; 2573 2574 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2575 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2576 2577 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 2578 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 2579 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE); 2580 2581 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2582 cp = &sc->pciide_channels[channel]; 2583 if (pciide_chansetup(sc, channel, interface) == 0) 2584 continue; 2585 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 2586 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 2587 printf("%s: %s channel ignored (disabled)\n", 2588 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2589 continue; 2590 } 2591 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2592 pciide_pci_intr); 2593 if (cp->hw_ok == 0) 2594 continue; 2595 if (pciide_chan_candisable(cp)) { 2596 if (channel == 0) 2597 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 2598 else 2599 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 2600 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 2601 sis_ctr0); 2602 } 2603 pciide_map_compat_intr(pa, cp, channel, interface); 2604 if (cp->hw_ok == 0) 2605 continue; 2606 sis_setup_channel(&cp->wdc_channel); 2607 } 2608 } 2609 2610 void 2611 sis_setup_channel(chp) 2612 struct channel_softc *chp; 2613 { 2614 struct ata_drive_datas *drvp; 2615 int drive; 2616 u_int32_t sis_tim; 2617 u_int32_t idedma_ctl; 2618 struct pciide_channel *cp = (struct pciide_channel*)chp; 2619 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2620 2621 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 2622 "channel %d 0x%x\n", chp->channel, 2623 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 2624 DEBUG_PROBE); 2625 sis_tim = 0; 2626 idedma_ctl = 0; 2627 /* setup DMA if needed */ 2628 pciide_channel_dma_setup(cp); 2629 2630 for (drive = 0; drive < 2; drive++) { 2631 drvp = &chp->ch_drive[drive]; 2632 /* If no drive, skip */ 2633 if ((drvp->drive_flags & DRIVE) == 0) 2634 continue; 2635 /* add timing values, setup DMA if needed */ 2636 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2637 (drvp->drive_flags & DRIVE_UDMA) == 0) 2638 goto pio; 2639 2640 if (drvp->drive_flags & DRIVE_UDMA) { 2641 /* use Ultra/DMA */ 2642 drvp->drive_flags &= ~DRIVE_DMA; 2643 sis_tim |= sis_udma_tim[drvp->UDMA_mode] << 2644 SIS_TIM_UDMA_TIME_OFF(drive); 2645 sis_tim |= SIS_TIM_UDMA_EN(drive); 2646 } else { 2647 /* 2648 * use Multiword DMA 2649 * Timings will be used for both PIO and DMA, 2650 * so adjust DMA mode if needed 2651 */ 2652 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2653 drvp->PIO_mode = drvp->DMA_mode + 2; 2654 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2655 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2656 drvp->PIO_mode - 2 : 0; 2657 if (drvp->DMA_mode == 0) 2658 drvp->PIO_mode = 0; 2659 } 2660 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2661 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] << 2662 SIS_TIM_ACT_OFF(drive); 2663 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 2664 SIS_TIM_REC_OFF(drive); 2665 } 2666 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 2667 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 2668 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 2669 if (idedma_ctl != 0) { 2670 /* Add software bits in status register */ 2671 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2672 IDEDMA_CTL, idedma_ctl); 2673 } 2674 pciide_print_modes(cp); 2675 } 2676 2677 void 2678 acer_chip_map(sc, pa) 2679 struct pciide_softc *sc; 2680 struct pci_attach_args *pa; 2681 { 2682 struct pciide_channel *cp; 2683 int channel; 2684 pcireg_t cr, interface; 2685 bus_size_t cmdsize, ctlsize; 2686 2687 if (pciide_chipen(sc, pa) == 0) 2688 return; 2689 printf("%s: bus-master DMA support present", 2690 sc->sc_wdcdev.sc_dev.dv_xname); 2691 pciide_mapreg_dma(sc, pa); 2692 printf("\n"); 2693 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2694 WDC_CAPABILITY_MODE; 2695 if (sc->sc_dma_ok) { 2696 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2697 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2698 sc->sc_wdcdev.irqack = pciide_irqack; 2699 } 2700 2701 sc->sc_wdcdev.PIO_cap = 4; 2702 sc->sc_wdcdev.DMA_cap = 2; 2703 sc->sc_wdcdev.UDMA_cap = 2; 2704 sc->sc_wdcdev.set_modes = acer_setup_channel; 2705 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2706 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2707 2708 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 2709 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 2710 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 2711 2712 /* Enable "microsoft register bits" R/W. */ 2713 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 2714 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 2715 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 2716 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 2717 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 2718 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 2719 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 2720 ~ACER_CHANSTATUSREGS_RO); 2721 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 2722 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 2723 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 2724 /* Don't use cr, re-read the real register content instead */ 2725 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 2726 PCI_CLASS_REG)); 2727 2728 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2729 cp = &sc->pciide_channels[channel]; 2730 if (pciide_chansetup(sc, channel, interface) == 0) 2731 continue; 2732 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 2733 printf("%s: %s channel ignored (disabled)\n", 2734 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2735 continue; 2736 } 2737 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2738 acer_pci_intr); 2739 if (cp->hw_ok == 0) 2740 continue; 2741 if (pciide_chan_candisable(cp)) { 2742 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 2743 pci_conf_write(sc->sc_pc, sc->sc_tag, 2744 PCI_CLASS_REG, cr); 2745 } 2746 pciide_map_compat_intr(pa, cp, channel, interface); 2747 acer_setup_channel(&cp->wdc_channel); 2748 } 2749 } 2750 2751 void 2752 acer_setup_channel(chp) 2753 struct channel_softc *chp; 2754 { 2755 struct ata_drive_datas *drvp; 2756 int drive; 2757 u_int32_t acer_fifo_udma; 2758 u_int32_t idedma_ctl; 2759 struct pciide_channel *cp = (struct pciide_channel*)chp; 2760 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2761 2762 idedma_ctl = 0; 2763 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 2764 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 2765 acer_fifo_udma), DEBUG_PROBE); 2766 /* setup DMA if needed */ 2767 pciide_channel_dma_setup(cp); 2768 2769 for (drive = 0; drive < 2; drive++) { 2770 drvp = &chp->ch_drive[drive]; 2771 /* If no drive, skip */ 2772 if ((drvp->drive_flags & DRIVE) == 0) 2773 continue; 2774 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 2775 "channel %d drive %d 0x%x\n", chp->channel, drive, 2776 pciide_pci_read(sc->sc_pc, sc->sc_tag, 2777 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 2778 /* clear FIFO/DMA mode */ 2779 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 2780 ACER_UDMA_EN(chp->channel, drive) | 2781 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 2782 2783 /* add timing values, setup DMA if needed */ 2784 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2785 (drvp->drive_flags & DRIVE_UDMA) == 0) { 2786 acer_fifo_udma |= 2787 ACER_FTH_OPL(chp->channel, drive, 0x1); 2788 goto pio; 2789 } 2790 2791 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 2792 if (drvp->drive_flags & DRIVE_UDMA) { 2793 /* use Ultra/DMA */ 2794 drvp->drive_flags &= ~DRIVE_DMA; 2795 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 2796 acer_fifo_udma |= 2797 ACER_UDMA_TIM(chp->channel, drive, 2798 acer_udma[drvp->UDMA_mode]); 2799 } else { 2800 /* 2801 * use Multiword DMA 2802 * Timings will be used for both PIO and DMA, 2803 * so adjust DMA mode if needed 2804 */ 2805 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2806 drvp->PIO_mode = drvp->DMA_mode + 2; 2807 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2808 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2809 drvp->PIO_mode - 2 : 0; 2810 if (drvp->DMA_mode == 0) 2811 drvp->PIO_mode = 0; 2812 } 2813 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2814 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 2815 ACER_IDETIM(chp->channel, drive), 2816 acer_pio[drvp->PIO_mode]); 2817 } 2818 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 2819 acer_fifo_udma), DEBUG_PROBE); 2820 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 2821 if (idedma_ctl != 0) { 2822 /* Add software bits in status register */ 2823 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2824 IDEDMA_CTL, idedma_ctl); 2825 } 2826 pciide_print_modes(cp); 2827 } 2828 2829 int 2830 acer_pci_intr(arg) 2831 void *arg; 2832 { 2833 struct pciide_softc *sc = arg; 2834 struct pciide_channel *cp; 2835 struct channel_softc *wdc_cp; 2836 int i, rv, crv; 2837 u_int32_t chids; 2838 2839 rv = 0; 2840 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 2841 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2842 cp = &sc->pciide_channels[i]; 2843 wdc_cp = &cp->wdc_channel; 2844 /* If a compat channel skip. */ 2845 if (cp->compat) 2846 continue; 2847 if (chids & ACER_CHIDS_INT(i)) { 2848 crv = wdcintr(wdc_cp); 2849 if (crv == 0) 2850 printf("%s:%d: bogus intr\n", 2851 sc->sc_wdcdev.sc_dev.dv_xname, i); 2852 else 2853 rv = 1; 2854 } 2855 } 2856 return rv; 2857 } 2858 2859 void 2860 hpt_chip_map(sc, pa) 2861 struct pciide_softc *sc; 2862 struct pci_attach_args *pa; 2863 { 2864 struct pciide_channel *cp; 2865 int i, compatchan, revision; 2866 pcireg_t interface; 2867 bus_size_t cmdsize, ctlsize; 2868 2869 if (pciide_chipen(sc, pa) == 0) 2870 return; 2871 revision = PCI_REVISION(pa->pa_class); 2872 2873 /* 2874 * when the chip is in native mode it identifies itself as a 2875 * 'misc mass storage'. Fake interface in this case. 2876 */ 2877 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 2878 interface = PCI_INTERFACE(pa->pa_class); 2879 } else { 2880 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2881 PCIIDE_INTERFACE_PCI(0); 2882 if (revision == HPT370_REV) 2883 interface |= PCIIDE_INTERFACE_PCI(1); 2884 } 2885 2886 printf("%s: bus-master DMA support present", 2887 sc->sc_wdcdev.sc_dev.dv_xname); 2888 pciide_mapreg_dma(sc, pa); 2889 printf("\n"); 2890 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2891 WDC_CAPABILITY_MODE; 2892 if (sc->sc_dma_ok) { 2893 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2894 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2895 sc->sc_wdcdev.irqack = pciide_irqack; 2896 } 2897 sc->sc_wdcdev.PIO_cap = 4; 2898 sc->sc_wdcdev.DMA_cap = 2; 2899 sc->sc_wdcdev.UDMA_cap = 4; 2900 2901 sc->sc_wdcdev.set_modes = hpt_setup_channel; 2902 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2903 if (revision == HPT366_REV) { 2904 /* 2905 * The 366 has 2 PCI IDE functions, one for primary and one 2906 * for secondary. So we need to call pciide_mapregs_compat() 2907 * with the real channel 2908 */ 2909 if (pa->pa_function == 0) { 2910 compatchan = 0; 2911 } else if (pa->pa_function == 1) { 2912 compatchan = 1; 2913 } else { 2914 printf("%s: unexpected PCI function %d\n", 2915 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2916 return; 2917 } 2918 sc->sc_wdcdev.nchannels = 1; 2919 } else { 2920 sc->sc_wdcdev.nchannels = 2; 2921 } 2922 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2923 cp = &sc->pciide_channels[i]; 2924 if (sc->sc_wdcdev.nchannels > 1) { 2925 compatchan = i; 2926 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 2927 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 2928 printf("%s: %s channel ignored (disabled)\n", 2929 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2930 continue; 2931 } 2932 } 2933 if (pciide_chansetup(sc, i, interface) == 0) 2934 continue; 2935 if (interface & PCIIDE_INTERFACE_PCI(i)) { 2936 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2937 &ctlsize, hpt_pci_intr); 2938 } else { 2939 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 2940 &cmdsize, &ctlsize); 2941 } 2942 if (cp->hw_ok == 0) 2943 return; 2944 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2945 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2946 wdcattach(&cp->wdc_channel); 2947 hpt_setup_channel(&cp->wdc_channel); 2948 } 2949 if (revision == HPT370_REV) { 2950 /* 2951 * HPT370_REV has a bit to disable interrupts, make sure 2952 * to clear it 2953 */ 2954 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 2955 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 2956 ~HPT_CSEL_IRQDIS); 2957 } 2958 return; 2959 } 2960 2961 2962 void 2963 hpt_setup_channel(chp) 2964 struct channel_softc *chp; 2965 { 2966 struct ata_drive_datas *drvp; 2967 int drive; 2968 int cable; 2969 u_int32_t before, after; 2970 u_int32_t idedma_ctl; 2971 struct pciide_channel *cp = (struct pciide_channel*)chp; 2972 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2973 2974 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 2975 2976 /* setup DMA if needed */ 2977 pciide_channel_dma_setup(cp); 2978 2979 idedma_ctl = 0; 2980 2981 /* Per drive settings */ 2982 for (drive = 0; drive < 2; drive++) { 2983 drvp = &chp->ch_drive[drive]; 2984 /* If no drive, skip */ 2985 if ((drvp->drive_flags & DRIVE) == 0) 2986 continue; 2987 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 2988 HPT_IDETIM(chp->channel, drive)); 2989 2990 /* add timing values, setup DMA if needed */ 2991 if (drvp->drive_flags & DRIVE_UDMA) { 2992 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 2993 drvp->UDMA_mode > 2) 2994 drvp->UDMA_mode = 2; 2995 after = (sc->sc_wdcdev.nchannels == 2) ? 2996 hpt370_udma[drvp->UDMA_mode] : 2997 hpt366_udma[drvp->UDMA_mode]; 2998 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2999 } else if (drvp->drive_flags & DRIVE_DMA) { 3000 /* 3001 * use Multiword DMA. 3002 * Timings will be used for both PIO and DMA, so adjust 3003 * DMA mode if needed 3004 */ 3005 if (drvp->PIO_mode >= 3 && 3006 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3007 drvp->DMA_mode = drvp->PIO_mode - 2; 3008 } 3009 after = (sc->sc_wdcdev.nchannels == 2) ? 3010 hpt370_dma[drvp->DMA_mode] : 3011 hpt366_dma[drvp->DMA_mode]; 3012 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3013 } else { 3014 /* PIO only */ 3015 after = (sc->sc_wdcdev.nchannels == 2) ? 3016 hpt370_pio[drvp->PIO_mode] : 3017 hpt366_pio[drvp->PIO_mode]; 3018 } 3019 pci_conf_write(sc->sc_pc, sc->sc_tag, 3020 HPT_IDETIM(chp->channel, drive), after); 3021 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 3022 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname, 3023 after, before), DEBUG_PROBE); 3024 } 3025 if (idedma_ctl != 0) { 3026 /* Add software bits in status register */ 3027 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3028 IDEDMA_CTL, idedma_ctl); 3029 } 3030 pciide_print_modes(cp); 3031 } 3032 3033 int 3034 hpt_pci_intr(arg) 3035 void *arg; 3036 { 3037 struct pciide_softc *sc = arg; 3038 struct pciide_channel *cp; 3039 struct channel_softc *wdc_cp; 3040 int rv = 0; 3041 int dmastat, i, crv; 3042 3043 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3044 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3045 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3046 if((dmastat & IDEDMA_CTL_INTR) == 0) 3047 continue; 3048 cp = &sc->pciide_channels[i]; 3049 wdc_cp = &cp->wdc_channel; 3050 crv = wdcintr(wdc_cp); 3051 if (crv == 0) { 3052 printf("%s:%d: bogus intr\n", 3053 sc->sc_wdcdev.sc_dev.dv_xname, i); 3054 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3055 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 3056 } else 3057 rv = 1; 3058 } 3059 return rv; 3060 } 3061 3062 3063 /* A macro to test product */ 3064 #define PDC_IS_262(sc) \ 3065 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \ 3066 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3067 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) 3068 3069 void 3070 pdc202xx_chip_map(sc, pa) 3071 struct pciide_softc *sc; 3072 struct pci_attach_args *pa; 3073 { 3074 struct pciide_channel *cp; 3075 int channel; 3076 pcireg_t interface, st, mode; 3077 bus_size_t cmdsize, ctlsize; 3078 3079 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3080 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st), 3081 DEBUG_PROBE); 3082 if (pciide_chipen(sc, pa) == 0) 3083 return; 3084 3085 /* turn off RAID mode */ 3086 st &= ~PDC2xx_STATE_IDERAID; 3087 3088 /* 3089 * can't rely on the PCI_CLASS_REG content if the chip was in raid 3090 * mode. We have to fake interface 3091 */ 3092 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 3093 if (st & PDC2xx_STATE_NATIVE) 3094 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3095 3096 printf("%s: bus-master DMA support present", 3097 sc->sc_wdcdev.sc_dev.dv_xname); 3098 pciide_mapreg_dma(sc, pa); 3099 printf("\n"); 3100 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3101 WDC_CAPABILITY_MODE; 3102 if (sc->sc_dma_ok) { 3103 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3104 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3105 sc->sc_wdcdev.irqack = pciide_irqack; 3106 } 3107 sc->sc_wdcdev.PIO_cap = 4; 3108 sc->sc_wdcdev.DMA_cap = 2; 3109 if (PDC_IS_262(sc)) 3110 sc->sc_wdcdev.UDMA_cap = 4; 3111 else 3112 sc->sc_wdcdev.UDMA_cap = 2; 3113 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel; 3114 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3115 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3116 3117 /* setup failsafe defaults */ 3118 mode = 0; 3119 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 3120 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 3121 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 3122 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 3123 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3124 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 " 3125 "initial timings 0x%x, now 0x%x\n", channel, 3126 pci_conf_read(sc->sc_pc, sc->sc_tag, 3127 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 3128 DEBUG_PROBE); 3129 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0), 3130 mode | PDC2xx_TIM_IORDYp); 3131 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 " 3132 "initial timings 0x%x, now 0x%x\n", channel, 3133 pci_conf_read(sc->sc_pc, sc->sc_tag, 3134 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 3135 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1), 3136 mode); 3137 } 3138 3139 mode = PDC2xx_SCR_DMA; 3140 if (PDC_IS_262(sc)) { 3141 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 3142 } else { 3143 /* the BIOS set it up this way */ 3144 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 3145 } 3146 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 3147 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 3148 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n", 3149 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode), 3150 DEBUG_PROBE); 3151 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode); 3152 3153 /* controller initial state register is OK even without BIOS */ 3154 /* Set DMA mode to IDE DMA compatibility */ 3155 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 3156 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ), 3157 DEBUG_PROBE); 3158 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 3159 mode | 0x1); 3160 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 3161 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 3162 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 3163 mode | 0x1); 3164 3165 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3166 cp = &sc->pciide_channels[channel]; 3167 if (pciide_chansetup(sc, channel, interface) == 0) 3168 continue; 3169 if ((st & (PDC_IS_262(sc) ? 3170 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 3171 printf("%s: %s channel ignored (disabled)\n", 3172 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3173 continue; 3174 } 3175 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3176 pdc202xx_pci_intr); 3177 if (cp->hw_ok == 0) 3178 continue; 3179 if (pciide_chan_candisable(cp)) 3180 st &= ~(PDC_IS_262(sc) ? 3181 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 3182 pciide_map_compat_intr(pa, cp, channel, interface); 3183 pdc202xx_setup_channel(&cp->wdc_channel); 3184 } 3185 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st), 3186 DEBUG_PROBE); 3187 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 3188 return; 3189 } 3190 3191 void 3192 pdc202xx_setup_channel(chp) 3193 struct channel_softc *chp; 3194 { 3195 struct ata_drive_datas *drvp; 3196 int drive; 3197 pcireg_t mode, st; 3198 u_int32_t idedma_ctl, scr, atapi; 3199 struct pciide_channel *cp = (struct pciide_channel*)chp; 3200 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3201 int channel = chp->channel; 3202 3203 /* setup DMA if needed */ 3204 pciide_channel_dma_setup(cp); 3205 3206 idedma_ctl = 0; 3207 3208 /* Per channel settings */ 3209 if (PDC_IS_262(sc)) { 3210 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3211 PDC262_U66); 3212 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3213 /* Trimm UDMA mode */ 3214 if ((st & PDC262_STATE_80P(channel)) != 0 || 3215 (chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3216 chp->ch_drive[0].UDMA_mode <= 2) || 3217 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3218 chp->ch_drive[1].UDMA_mode <= 2)) { 3219 if (chp->ch_drive[0].UDMA_mode > 2) 3220 chp->ch_drive[0].UDMA_mode = 2; 3221 if (chp->ch_drive[1].UDMA_mode > 2) 3222 chp->ch_drive[1].UDMA_mode = 2; 3223 } 3224 /* Set U66 if needed */ 3225 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3226 chp->ch_drive[0].UDMA_mode > 2) || 3227 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3228 chp->ch_drive[1].UDMA_mode > 2)) 3229 scr |= PDC262_U66_EN(channel); 3230 else 3231 scr &= ~PDC262_U66_EN(channel); 3232 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3233 PDC262_U66, scr); 3234 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 3235 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 3236 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3237 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3238 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 3239 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3240 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3241 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 3242 atapi = 0; 3243 else 3244 atapi = PDC262_ATAPI_UDMA; 3245 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3246 PDC262_ATAPI(channel), atapi); 3247 } 3248 } 3249 for (drive = 0; drive < 2; drive++) { 3250 drvp = &chp->ch_drive[drive]; 3251 /* If no drive, skip */ 3252 if ((drvp->drive_flags & DRIVE) == 0) 3253 continue; 3254 mode = 0; 3255 if (drvp->drive_flags & DRIVE_UDMA) { 3256 mode = PDC2xx_TIM_SET_MB(mode, 3257 pdc2xx_udma_mb[drvp->UDMA_mode]); 3258 mode = PDC2xx_TIM_SET_MC(mode, 3259 pdc2xx_udma_mc[drvp->UDMA_mode]); 3260 drvp->drive_flags &= ~DRIVE_DMA; 3261 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3262 } else if (drvp->drive_flags & DRIVE_DMA) { 3263 mode = PDC2xx_TIM_SET_MB(mode, 3264 pdc2xx_dma_mb[drvp->DMA_mode]); 3265 mode = PDC2xx_TIM_SET_MC(mode, 3266 pdc2xx_dma_mc[drvp->DMA_mode]); 3267 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3268 } else { 3269 mode = PDC2xx_TIM_SET_MB(mode, 3270 pdc2xx_dma_mb[0]); 3271 mode = PDC2xx_TIM_SET_MC(mode, 3272 pdc2xx_dma_mc[0]); 3273 } 3274 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 3275 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 3276 if (drvp->drive_flags & DRIVE_ATA) 3277 mode |= PDC2xx_TIM_PRE; 3278 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 3279 if (drvp->PIO_mode >= 3) { 3280 mode |= PDC2xx_TIM_IORDY; 3281 if (drive == 0) 3282 mode |= PDC2xx_TIM_IORDYp; 3283 } 3284 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 3285 "timings 0x%x\n", 3286 sc->sc_wdcdev.sc_dev.dv_xname, 3287 chp->channel, drive, mode), DEBUG_PROBE); 3288 pci_conf_write(sc->sc_pc, sc->sc_tag, 3289 PDC2xx_TIM(chp->channel, drive), mode); 3290 } 3291 if (idedma_ctl != 0) { 3292 /* Add software bits in status register */ 3293 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3294 IDEDMA_CTL, idedma_ctl); 3295 } 3296 pciide_print_modes(cp); 3297 } 3298 3299 int 3300 pdc202xx_pci_intr(arg) 3301 void *arg; 3302 { 3303 struct pciide_softc *sc = arg; 3304 struct pciide_channel *cp; 3305 struct channel_softc *wdc_cp; 3306 int i, rv, crv; 3307 u_int32_t scr; 3308 3309 rv = 0; 3310 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 3311 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3312 cp = &sc->pciide_channels[i]; 3313 wdc_cp = &cp->wdc_channel; 3314 /* If a compat channel skip. */ 3315 if (cp->compat) 3316 continue; 3317 if (scr & PDC2xx_SCR_INT(i)) { 3318 crv = wdcintr(wdc_cp); 3319 if (crv == 0) 3320 printf("%s:%d: bogus intr\n", 3321 sc->sc_wdcdev.sc_dev.dv_xname, i); 3322 else 3323 rv = 1; 3324 } 3325 } 3326 return rv; 3327 } 3328 3329 void 3330 opti_chip_map(sc, pa) 3331 struct pciide_softc *sc; 3332 struct pci_attach_args *pa; 3333 { 3334 struct pciide_channel *cp; 3335 bus_size_t cmdsize, ctlsize; 3336 pcireg_t interface; 3337 u_int8_t init_ctrl; 3338 int channel; 3339 3340 if (pciide_chipen(sc, pa) == 0) 3341 return; 3342 printf("%s: bus-master DMA support present", 3343 sc->sc_wdcdev.sc_dev.dv_xname); 3344 pciide_mapreg_dma(sc, pa); 3345 printf("\n"); 3346 3347 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3348 WDC_CAPABILITY_MODE; 3349 sc->sc_wdcdev.PIO_cap = 4; 3350 if (sc->sc_dma_ok) { 3351 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3352 sc->sc_wdcdev.irqack = pciide_irqack; 3353 sc->sc_wdcdev.DMA_cap = 2; 3354 } 3355 sc->sc_wdcdev.set_modes = opti_setup_channel; 3356 3357 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3358 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3359 3360 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 3361 OPTI_REG_INIT_CONTROL); 3362 3363 interface = PCI_INTERFACE(pa->pa_class); 3364 3365 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3366 cp = &sc->pciide_channels[channel]; 3367 if (pciide_chansetup(sc, channel, interface) == 0) 3368 continue; 3369 if (channel == 1 && 3370 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 3371 printf("%s: %s channel ignored (disabled)\n", 3372 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3373 continue; 3374 } 3375 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3376 pciide_pci_intr); 3377 if (cp->hw_ok == 0) 3378 continue; 3379 pciide_map_compat_intr(pa, cp, channel, interface); 3380 if (cp->hw_ok == 0) 3381 continue; 3382 opti_setup_channel(&cp->wdc_channel); 3383 } 3384 } 3385 3386 void 3387 opti_setup_channel(chp) 3388 struct channel_softc *chp; 3389 { 3390 struct ata_drive_datas *drvp; 3391 struct pciide_channel *cp = (struct pciide_channel*)chp; 3392 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3393 int drive, spd; 3394 int mode[2]; 3395 u_int8_t rv, mr; 3396 3397 /* 3398 * The `Delay' and `Address Setup Time' fields of the 3399 * Miscellaneous Register are always zero initially. 3400 */ 3401 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 3402 mr &= ~(OPTI_MISC_DELAY_MASK | 3403 OPTI_MISC_ADDR_SETUP_MASK | 3404 OPTI_MISC_INDEX_MASK); 3405 3406 /* Prime the control register before setting timing values */ 3407 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 3408 3409 /* Determine the clockrate of the PCIbus the chip is attached to */ 3410 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 3411 spd &= OPTI_STRAP_PCI_SPEED_MASK; 3412 3413 /* setup DMA if needed */ 3414 pciide_channel_dma_setup(cp); 3415 3416 for (drive = 0; drive < 2; drive++) { 3417 drvp = &chp->ch_drive[drive]; 3418 /* If no drive, skip */ 3419 if ((drvp->drive_flags & DRIVE) == 0) { 3420 mode[drive] = -1; 3421 continue; 3422 } 3423 3424 if ((drvp->drive_flags & DRIVE_DMA)) { 3425 /* 3426 * Timings will be used for both PIO and DMA, 3427 * so adjust DMA mode if needed 3428 */ 3429 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3430 drvp->PIO_mode = drvp->DMA_mode + 2; 3431 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3432 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3433 drvp->PIO_mode - 2 : 0; 3434 if (drvp->DMA_mode == 0) 3435 drvp->PIO_mode = 0; 3436 3437 mode[drive] = drvp->DMA_mode + 5; 3438 } else 3439 mode[drive] = drvp->PIO_mode; 3440 3441 if (drive && mode[0] >= 0 && 3442 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 3443 /* 3444 * Can't have two drives using different values 3445 * for `Address Setup Time'. 3446 * Slow down the faster drive to compensate. 3447 */ 3448 int d = (opti_tim_as[spd][mode[0]] > 3449 opti_tim_as[spd][mode[1]]) ? 0 : 1; 3450 3451 mode[d] = mode[1-d]; 3452 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 3453 chp->ch_drive[d].DMA_mode = 0; 3454 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 3455 } 3456 } 3457 3458 for (drive = 0; drive < 2; drive++) { 3459 int m; 3460 if ((m = mode[drive]) < 0) 3461 continue; 3462 3463 /* Set the Address Setup Time and select appropriate index */ 3464 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 3465 rv |= OPTI_MISC_INDEX(drive); 3466 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 3467 3468 /* Set the pulse width and recovery timing parameters */ 3469 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 3470 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 3471 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 3472 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 3473 3474 /* Set the Enhanced Mode register appropriately */ 3475 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 3476 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 3477 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 3478 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 3479 } 3480 3481 /* Finally, enable the timings */ 3482 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 3483 3484 pciide_print_modes(cp); 3485 } 3486