1 /* $NetBSD: pciide.c,v 1.105 2001/01/12 16:03:59 bouyer Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36 37 /* 38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by Christopher G. Demetriou 51 * for the NetBSD Project. 52 * 4. The name of the author may not be used to endorse or promote products 53 * derived from this software without specific prior written permission 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 */ 66 67 /* 68 * PCI IDE controller driver. 69 * 70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 71 * sys/dev/pci/ppb.c, revision 1.16). 72 * 73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 75 * 5/16/94" from the PCI SIG. 76 * 77 */ 78 79 #ifndef WDCDEBUG 80 #define WDCDEBUG 81 #endif 82 83 #define DEBUG_DMA 0x01 84 #define DEBUG_XFERS 0x02 85 #define DEBUG_FUNCS 0x08 86 #define DEBUG_PROBE 0x10 87 #ifdef WDCDEBUG 88 int wdcdebug_pciide_mask = 0; 89 #define WDCDEBUG_PRINT(args, level) \ 90 if (wdcdebug_pciide_mask & (level)) printf args 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <uvm/uvm_extern.h> 100 101 #include <machine/endian.h> 102 103 #include <dev/pci/pcireg.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcidevs.h> 106 #include <dev/pci/pciidereg.h> 107 #include <dev/pci/pciidevar.h> 108 #include <dev/pci/pciide_piix_reg.h> 109 #include <dev/pci/pciide_amd_reg.h> 110 #include <dev/pci/pciide_apollo_reg.h> 111 #include <dev/pci/pciide_cmd_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/cy82c693var.h> 119 120 #include "opt_pciide.h" 121 122 /* inlines for reading/writing 8-bit PCI registers */ 123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t, 124 int)); 125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t, 126 int, u_int8_t)); 127 128 static __inline u_int8_t 129 pciide_pci_read(pc, pa, reg) 130 pci_chipset_tag_t pc; 131 pcitag_t pa; 132 int reg; 133 { 134 135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 136 ((reg & 0x03) * 8) & 0xff); 137 } 138 139 static __inline void 140 pciide_pci_write(pc, pa, reg, val) 141 pci_chipset_tag_t pc; 142 pcitag_t pa; 143 int reg; 144 u_int8_t val; 145 { 146 pcireg_t pcival; 147 148 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 149 pcival &= ~(0xff << ((reg & 0x03) * 8)); 150 pcival |= (val << ((reg & 0x03) * 8)); 151 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 152 } 153 154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 155 156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 157 void piix_setup_channel __P((struct channel_softc*)); 158 void piix3_4_setup_channel __P((struct channel_softc*)); 159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*)); 161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 162 163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 164 void amd756_setup_channel __P((struct channel_softc*)); 165 166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 167 void apollo_setup_channel __P((struct channel_softc*)); 168 169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 170 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 171 void cmd0643_9_setup_channel __P((struct channel_softc*)); 172 void cmd_channel_map __P((struct pci_attach_args *, 173 struct pciide_softc *, int)); 174 int cmd_pci_intr __P((void *)); 175 void cmd646_9_irqack __P((struct channel_softc *)); 176 177 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 178 void cy693_setup_channel __P((struct channel_softc*)); 179 180 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 181 void sis_setup_channel __P((struct channel_softc*)); 182 183 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 184 void acer_setup_channel __P((struct channel_softc*)); 185 int acer_pci_intr __P((void *)); 186 187 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 188 void pdc202xx_setup_channel __P((struct channel_softc*)); 189 int pdc202xx_pci_intr __P((void *)); 190 191 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 192 void opti_setup_channel __P((struct channel_softc*)); 193 194 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 195 void hpt_setup_channel __P((struct channel_softc*)); 196 int hpt_pci_intr __P((void *)); 197 198 void pciide_channel_dma_setup __P((struct pciide_channel *)); 199 int pciide_dma_table_setup __P((struct pciide_softc*, int, int)); 200 int pciide_dma_init __P((void*, int, int, void *, size_t, int)); 201 void pciide_dma_start __P((void*, int, int)); 202 int pciide_dma_finish __P((void*, int, int, int)); 203 void pciide_irqack __P((struct channel_softc *)); 204 void pciide_print_modes __P((struct pciide_channel *)); 205 206 struct pciide_product_desc { 207 u_int32_t ide_product; 208 int ide_flags; 209 const char *ide_name; 210 /* map and setup chip, probe drives */ 211 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*)); 212 }; 213 214 /* Flags for ide_flags */ 215 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 216 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 217 218 /* Default product description for devices not known from this controller */ 219 const struct pciide_product_desc default_product_desc = { 220 0, 221 0, 222 "Generic PCI IDE controller", 223 default_chip_map, 224 }; 225 226 const struct pciide_product_desc pciide_intel_products[] = { 227 { PCI_PRODUCT_INTEL_82092AA, 228 0, 229 "Intel 82092AA IDE controller", 230 default_chip_map, 231 }, 232 { PCI_PRODUCT_INTEL_82371FB_IDE, 233 0, 234 "Intel 82371FB IDE controller (PIIX)", 235 piix_chip_map, 236 }, 237 { PCI_PRODUCT_INTEL_82371SB_IDE, 238 0, 239 "Intel 82371SB IDE Interface (PIIX3)", 240 piix_chip_map, 241 }, 242 { PCI_PRODUCT_INTEL_82371AB_IDE, 243 0, 244 "Intel 82371AB IDE controller (PIIX4)", 245 piix_chip_map, 246 }, 247 { PCI_PRODUCT_INTEL_82440MX_IDE, 248 0, 249 "Intel 82440MX IDE controller", 250 piix_chip_map 251 }, 252 { PCI_PRODUCT_INTEL_82801AA_IDE, 253 0, 254 "Intel 82801AA IDE Controller (ICH)", 255 piix_chip_map, 256 }, 257 { PCI_PRODUCT_INTEL_82801AB_IDE, 258 0, 259 "Intel 82801AB IDE Controller (ICH0)", 260 piix_chip_map, 261 }, 262 { PCI_PRODUCT_INTEL_82801BA_IDE, 263 0, 264 "Intel 82801BA IDE Controller (ICH2)", 265 piix_chip_map, 266 }, 267 { 0, 268 0, 269 NULL, 270 } 271 }; 272 273 const struct pciide_product_desc pciide_amd_products[] = { 274 { PCI_PRODUCT_AMD_PBC756_IDE, 275 0, 276 "Advanced Micro Devices AMD756 IDE Controller", 277 amd756_chip_map 278 }, 279 { 0, 280 0, 281 NULL, 282 } 283 }; 284 285 const struct pciide_product_desc pciide_cmd_products[] = { 286 { PCI_PRODUCT_CMDTECH_640, 287 0, 288 "CMD Technology PCI0640", 289 cmd_chip_map 290 }, 291 { PCI_PRODUCT_CMDTECH_643, 292 0, 293 "CMD Technology PCI0643", 294 cmd0643_9_chip_map, 295 }, 296 { PCI_PRODUCT_CMDTECH_646, 297 0, 298 "CMD Technology PCI0646", 299 cmd0643_9_chip_map, 300 }, 301 { PCI_PRODUCT_CMDTECH_648, 302 IDE_PCI_CLASS_OVERRIDE, 303 "CMD Technology PCI0648", 304 cmd0643_9_chip_map, 305 }, 306 { PCI_PRODUCT_CMDTECH_649, 307 IDE_PCI_CLASS_OVERRIDE, 308 "CMD Technology PCI0649", 309 cmd0643_9_chip_map, 310 }, 311 { 0, 312 0, 313 NULL, 314 } 315 }; 316 317 const struct pciide_product_desc pciide_via_products[] = { 318 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 319 0, 320 "VIA Tech VT82C586 IDE Controller", 321 apollo_chip_map, 322 }, 323 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 324 0, 325 "VIA Tech VT82C586A IDE Controller", 326 apollo_chip_map, 327 }, 328 { 0, 329 0, 330 NULL, 331 } 332 }; 333 334 const struct pciide_product_desc pciide_cypress_products[] = { 335 { PCI_PRODUCT_CONTAQ_82C693, 336 IDE_16BIT_IOSPACE, 337 "Cypress 82C693 IDE Controller", 338 cy693_chip_map, 339 }, 340 { 0, 341 0, 342 NULL, 343 } 344 }; 345 346 const struct pciide_product_desc pciide_sis_products[] = { 347 { PCI_PRODUCT_SIS_5597_IDE, 348 0, 349 "Silicon Integrated System 5597/5598 IDE controller", 350 sis_chip_map, 351 }, 352 { 0, 353 0, 354 NULL, 355 } 356 }; 357 358 const struct pciide_product_desc pciide_acer_products[] = { 359 { PCI_PRODUCT_ALI_M5229, 360 0, 361 "Acer Labs M5229 UDMA IDE Controller", 362 acer_chip_map, 363 }, 364 { 0, 365 0, 366 NULL, 367 } 368 }; 369 370 const struct pciide_product_desc pciide_promise_products[] = { 371 { PCI_PRODUCT_PROMISE_ULTRA33, 372 IDE_PCI_CLASS_OVERRIDE, 373 "Promise Ultra33/ATA Bus Master IDE Accelerator", 374 pdc202xx_chip_map, 375 }, 376 { PCI_PRODUCT_PROMISE_ULTRA66, 377 IDE_PCI_CLASS_OVERRIDE, 378 "Promise Ultra66/ATA Bus Master IDE Accelerator", 379 pdc202xx_chip_map, 380 }, 381 { PCI_PRODUCT_PROMISE_ULTRA100, 382 IDE_PCI_CLASS_OVERRIDE, 383 "Promise Ultra100/ATA Bus Master IDE Accelerator", 384 pdc202xx_chip_map, 385 }, 386 { PCI_PRODUCT_PROMISE_ULTRA100X, 387 IDE_PCI_CLASS_OVERRIDE, 388 "Promise Ultra100/ATA Bus Master IDE Accelerator", 389 pdc202xx_chip_map, 390 }, 391 { 0, 392 0, 393 NULL, 394 } 395 }; 396 397 const struct pciide_product_desc pciide_opti_products[] = { 398 { PCI_PRODUCT_OPTI_82C621, 399 0, 400 "OPTi 82c621 PCI IDE controller", 401 opti_chip_map, 402 }, 403 { PCI_PRODUCT_OPTI_82C568, 404 0, 405 "OPTi 82c568 (82c621 compatible) PCI IDE controller", 406 opti_chip_map, 407 }, 408 { PCI_PRODUCT_OPTI_82D568, 409 0, 410 "OPTi 82d568 (82c621 compatible) PCI IDE controller", 411 opti_chip_map, 412 }, 413 { 0, 414 0, 415 NULL, 416 } 417 }; 418 419 const struct pciide_product_desc pciide_triones_products[] = { 420 { PCI_PRODUCT_TRIONES_HPT366, 421 IDE_PCI_CLASS_OVERRIDE, 422 "Triones/Highpoint HPT366/370 IDE Controller", 423 hpt_chip_map, 424 }, 425 { 0, 426 0, 427 NULL, 428 } 429 }; 430 431 struct pciide_vendor_desc { 432 u_int32_t ide_vendor; 433 const struct pciide_product_desc *ide_products; 434 }; 435 436 const struct pciide_vendor_desc pciide_vendors[] = { 437 { PCI_VENDOR_INTEL, pciide_intel_products }, 438 { PCI_VENDOR_CMDTECH, pciide_cmd_products }, 439 { PCI_VENDOR_VIATECH, pciide_via_products }, 440 { PCI_VENDOR_CONTAQ, pciide_cypress_products }, 441 { PCI_VENDOR_SIS, pciide_sis_products }, 442 { PCI_VENDOR_ALI, pciide_acer_products }, 443 { PCI_VENDOR_PROMISE, pciide_promise_products }, 444 { PCI_VENDOR_AMD, pciide_amd_products }, 445 { PCI_VENDOR_OPTI, pciide_opti_products }, 446 { PCI_VENDOR_TRIONES, pciide_triones_products }, 447 { 0, NULL } 448 }; 449 450 /* options passed via the 'flags' config keyword */ 451 #define PCIIDE_OPTIONS_DMA 0x01 452 453 int pciide_match __P((struct device *, struct cfdata *, void *)); 454 void pciide_attach __P((struct device *, struct device *, void *)); 455 456 struct cfattach pciide_ca = { 457 sizeof(struct pciide_softc), pciide_match, pciide_attach 458 }; 459 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *)); 460 int pciide_mapregs_compat __P(( struct pci_attach_args *, 461 struct pciide_channel *, int, bus_size_t *, bus_size_t*)); 462 int pciide_mapregs_native __P((struct pci_attach_args *, 463 struct pciide_channel *, bus_size_t *, bus_size_t *, 464 int (*pci_intr) __P((void *)))); 465 void pciide_mapreg_dma __P((struct pciide_softc *, 466 struct pci_attach_args *)); 467 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t)); 468 void pciide_mapchan __P((struct pci_attach_args *, 469 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 470 int (*pci_intr) __P((void *)))); 471 int pciide_chan_candisable __P((struct pciide_channel *)); 472 void pciide_map_compat_intr __P(( struct pci_attach_args *, 473 struct pciide_channel *, int, int)); 474 int pciide_print __P((void *, const char *pnp)); 475 int pciide_compat_intr __P((void *)); 476 int pciide_pci_intr __P((void *)); 477 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t)); 478 479 const struct pciide_product_desc * 480 pciide_lookup_product(id) 481 u_int32_t id; 482 { 483 const struct pciide_product_desc *pp; 484 const struct pciide_vendor_desc *vp; 485 486 for (vp = pciide_vendors; vp->ide_products != NULL; vp++) 487 if (PCI_VENDOR(id) == vp->ide_vendor) 488 break; 489 490 if ((pp = vp->ide_products) == NULL) 491 return NULL; 492 493 for (; pp->ide_name != NULL; pp++) 494 if (PCI_PRODUCT(id) == pp->ide_product) 495 break; 496 497 if (pp->ide_name == NULL) 498 return NULL; 499 return pp; 500 } 501 502 int 503 pciide_match(parent, match, aux) 504 struct device *parent; 505 struct cfdata *match; 506 void *aux; 507 { 508 struct pci_attach_args *pa = aux; 509 const struct pciide_product_desc *pp; 510 511 /* 512 * Check the ID register to see that it's a PCI IDE controller. 513 * If it is, we assume that we can deal with it; it _should_ 514 * work in a standardized way... 515 */ 516 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 517 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 518 return (1); 519 } 520 521 /* 522 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE 523 * controllers. Let see if we can deal with it anyway. 524 */ 525 pp = pciide_lookup_product(pa->pa_id); 526 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 527 return (1); 528 } 529 530 return (0); 531 } 532 533 void 534 pciide_attach(parent, self, aux) 535 struct device *parent, *self; 536 void *aux; 537 { 538 struct pci_attach_args *pa = aux; 539 pci_chipset_tag_t pc = pa->pa_pc; 540 pcitag_t tag = pa->pa_tag; 541 struct pciide_softc *sc = (struct pciide_softc *)self; 542 pcireg_t csr; 543 char devinfo[256]; 544 const char *displaydev; 545 546 sc->sc_pp = pciide_lookup_product(pa->pa_id); 547 if (sc->sc_pp == NULL) { 548 sc->sc_pp = &default_product_desc; 549 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); 550 displaydev = devinfo; 551 } else 552 displaydev = sc->sc_pp->ide_name; 553 554 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class)); 555 556 sc->sc_pc = pa->pa_pc; 557 sc->sc_tag = pa->pa_tag; 558 #ifdef WDCDEBUG 559 if (wdcdebug_pciide_mask & DEBUG_PROBE) 560 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 561 #endif 562 sc->sc_pp->chip_map(sc, pa); 563 564 if (sc->sc_dma_ok) { 565 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 566 csr |= PCI_COMMAND_MASTER_ENABLE; 567 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 568 } 569 WDCDEBUG_PRINT(("pciide: command/status register=%x\n", 570 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 571 } 572 573 /* tell wether the chip is enabled or not */ 574 int 575 pciide_chipen(sc, pa) 576 struct pciide_softc *sc; 577 struct pci_attach_args *pa; 578 { 579 pcireg_t csr; 580 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 581 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 582 PCI_COMMAND_STATUS_REG); 583 printf("%s: device disabled (at %s)\n", 584 sc->sc_wdcdev.sc_dev.dv_xname, 585 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 586 "device" : "bridge"); 587 return 0; 588 } 589 return 1; 590 } 591 592 int 593 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 594 struct pci_attach_args *pa; 595 struct pciide_channel *cp; 596 int compatchan; 597 bus_size_t *cmdsizep, *ctlsizep; 598 { 599 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 600 struct channel_softc *wdc_cp = &cp->wdc_channel; 601 602 cp->compat = 1; 603 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 604 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 605 606 wdc_cp->cmd_iot = pa->pa_iot; 607 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 608 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 609 printf("%s: couldn't map %s channel cmd regs\n", 610 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 611 return (0); 612 } 613 614 wdc_cp->ctl_iot = pa->pa_iot; 615 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 616 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 617 printf("%s: couldn't map %s channel ctl regs\n", 618 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 619 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 620 PCIIDE_COMPAT_CMD_SIZE); 621 return (0); 622 } 623 624 return (1); 625 } 626 627 int 628 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 629 struct pci_attach_args * pa; 630 struct pciide_channel *cp; 631 bus_size_t *cmdsizep, *ctlsizep; 632 int (*pci_intr) __P((void *)); 633 { 634 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 635 struct channel_softc *wdc_cp = &cp->wdc_channel; 636 const char *intrstr; 637 pci_intr_handle_t intrhandle; 638 639 cp->compat = 0; 640 641 if (sc->sc_pci_ih == NULL) { 642 if (pci_intr_map(pa, &intrhandle) != 0) { 643 printf("%s: couldn't map native-PCI interrupt\n", 644 sc->sc_wdcdev.sc_dev.dv_xname); 645 return 0; 646 } 647 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 648 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 649 intrhandle, IPL_BIO, pci_intr, sc); 650 if (sc->sc_pci_ih != NULL) { 651 printf("%s: using %s for native-PCI interrupt\n", 652 sc->sc_wdcdev.sc_dev.dv_xname, 653 intrstr ? intrstr : "unknown interrupt"); 654 } else { 655 printf("%s: couldn't establish native-PCI interrupt", 656 sc->sc_wdcdev.sc_dev.dv_xname); 657 if (intrstr != NULL) 658 printf(" at %s", intrstr); 659 printf("\n"); 660 return 0; 661 } 662 } 663 cp->ih = sc->sc_pci_ih; 664 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 665 PCI_MAPREG_TYPE_IO, 0, 666 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) { 667 printf("%s: couldn't map %s channel cmd regs\n", 668 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 669 return 0; 670 } 671 672 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 673 PCI_MAPREG_TYPE_IO, 0, 674 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) { 675 printf("%s: couldn't map %s channel ctl regs\n", 676 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 677 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 678 return 0; 679 } 680 /* 681 * In native mode, 4 bytes of I/O space are mapped for the control 682 * register, the control register is at offset 2. Pass the generic 683 * code a handle for only one byte at the rigth offset. 684 */ 685 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 686 &wdc_cp->ctl_ioh) != 0) { 687 printf("%s: unable to subregion %s channel ctl regs\n", 688 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 689 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 690 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 691 return 0; 692 } 693 return (1); 694 } 695 696 void 697 pciide_mapreg_dma(sc, pa) 698 struct pciide_softc *sc; 699 struct pci_attach_args *pa; 700 { 701 pcireg_t maptype; 702 bus_addr_t addr; 703 704 /* 705 * Map DMA registers 706 * 707 * Note that sc_dma_ok is the right variable to test to see if 708 * DMA can be done. If the interface doesn't support DMA, 709 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 710 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 711 * non-zero if the interface supports DMA and the registers 712 * could be mapped. 713 * 714 * XXX Note that despite the fact that the Bus Master IDE specs 715 * XXX say that "The bus master IDE function uses 16 bytes of IO 716 * XXX space," some controllers (at least the United 717 * XXX Microelectronics UM8886BF) place it in memory space. 718 */ 719 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 720 PCIIDE_REG_BUS_MASTER_DMA); 721 722 switch (maptype) { 723 case PCI_MAPREG_TYPE_IO: 724 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 725 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 726 &addr, NULL, NULL) == 0); 727 if (sc->sc_dma_ok == 0) { 728 printf(", but unused (couldn't query registers)"); 729 break; 730 } 731 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 732 && addr >= 0x10000) { 733 sc->sc_dma_ok = 0; 734 printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr); 735 break; 736 } 737 /* FALLTHROUGH */ 738 739 case PCI_MAPREG_MEM_TYPE_32BIT: 740 sc->sc_dma_ok = (pci_mapreg_map(pa, 741 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 742 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 743 sc->sc_dmat = pa->pa_dmat; 744 if (sc->sc_dma_ok == 0) { 745 printf(", but unused (couldn't map registers)"); 746 } else { 747 sc->sc_wdcdev.dma_arg = sc; 748 sc->sc_wdcdev.dma_init = pciide_dma_init; 749 sc->sc_wdcdev.dma_start = pciide_dma_start; 750 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 751 } 752 break; 753 754 default: 755 sc->sc_dma_ok = 0; 756 printf(", but unsupported register maptype (0x%x)", maptype); 757 } 758 } 759 760 int 761 pciide_compat_intr(arg) 762 void *arg; 763 { 764 struct pciide_channel *cp = arg; 765 766 #ifdef DIAGNOSTIC 767 /* should only be called for a compat channel */ 768 if (cp->compat == 0) 769 panic("pciide compat intr called for non-compat chan %p\n", cp); 770 #endif 771 return (wdcintr(&cp->wdc_channel)); 772 } 773 774 int 775 pciide_pci_intr(arg) 776 void *arg; 777 { 778 struct pciide_softc *sc = arg; 779 struct pciide_channel *cp; 780 struct channel_softc *wdc_cp; 781 int i, rv, crv; 782 783 rv = 0; 784 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 785 cp = &sc->pciide_channels[i]; 786 wdc_cp = &cp->wdc_channel; 787 788 /* If a compat channel skip. */ 789 if (cp->compat) 790 continue; 791 /* if this channel not waiting for intr, skip */ 792 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 793 continue; 794 795 crv = wdcintr(wdc_cp); 796 if (crv == 0) 797 ; /* leave rv alone */ 798 else if (crv == 1) 799 rv = 1; /* claim the intr */ 800 else if (rv == 0) /* crv should be -1 in this case */ 801 rv = crv; /* if we've done no better, take it */ 802 } 803 return (rv); 804 } 805 806 void 807 pciide_channel_dma_setup(cp) 808 struct pciide_channel *cp; 809 { 810 int drive; 811 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 812 struct ata_drive_datas *drvp; 813 814 for (drive = 0; drive < 2; drive++) { 815 drvp = &cp->wdc_channel.ch_drive[drive]; 816 /* If no drive, skip */ 817 if ((drvp->drive_flags & DRIVE) == 0) 818 continue; 819 /* setup DMA if needed */ 820 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 821 (drvp->drive_flags & DRIVE_UDMA) == 0) || 822 sc->sc_dma_ok == 0) { 823 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 824 continue; 825 } 826 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 827 != 0) { 828 /* Abort DMA setup */ 829 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 830 continue; 831 } 832 } 833 } 834 835 int 836 pciide_dma_table_setup(sc, channel, drive) 837 struct pciide_softc *sc; 838 int channel, drive; 839 { 840 bus_dma_segment_t seg; 841 int error, rseg; 842 const bus_size_t dma_table_size = 843 sizeof(struct idedma_table) * NIDEDMA_TABLES; 844 struct pciide_dma_maps *dma_maps = 845 &sc->pciide_channels[channel].dma_maps[drive]; 846 847 /* If table was already allocated, just return */ 848 if (dma_maps->dma_table) 849 return 0; 850 851 /* Allocate memory for the DMA tables and map it */ 852 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 853 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 854 BUS_DMA_NOWAIT)) != 0) { 855 printf("%s:%d: unable to allocate table DMA for " 856 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 857 channel, drive, error); 858 return error; 859 } 860 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 861 dma_table_size, 862 (caddr_t *)&dma_maps->dma_table, 863 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 864 printf("%s:%d: unable to map table DMA for" 865 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 866 channel, drive, error); 867 return error; 868 } 869 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 870 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 871 (unsigned long)seg.ds_addr), DEBUG_PROBE); 872 873 /* Create and load table DMA map for this disk */ 874 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 875 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 876 &dma_maps->dmamap_table)) != 0) { 877 printf("%s:%d: unable to create table DMA map for " 878 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 879 channel, drive, error); 880 return error; 881 } 882 if ((error = bus_dmamap_load(sc->sc_dmat, 883 dma_maps->dmamap_table, 884 dma_maps->dma_table, 885 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 886 printf("%s:%d: unable to load table DMA map for " 887 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 888 channel, drive, error); 889 return error; 890 } 891 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 892 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 893 DEBUG_PROBE); 894 /* Create a xfer DMA map for this drive */ 895 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 896 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN, 897 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 898 &dma_maps->dmamap_xfer)) != 0) { 899 printf("%s:%d: unable to create xfer DMA map for " 900 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 901 channel, drive, error); 902 return error; 903 } 904 return 0; 905 } 906 907 int 908 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 909 void *v; 910 int channel, drive; 911 void *databuf; 912 size_t datalen; 913 int flags; 914 { 915 struct pciide_softc *sc = v; 916 int error, seg; 917 struct pciide_dma_maps *dma_maps = 918 &sc->pciide_channels[channel].dma_maps[drive]; 919 920 error = bus_dmamap_load(sc->sc_dmat, 921 dma_maps->dmamap_xfer, 922 databuf, datalen, NULL, BUS_DMA_NOWAIT); 923 if (error) { 924 printf("%s:%d: unable to load xfer DMA map for" 925 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 926 channel, drive, error); 927 return error; 928 } 929 930 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 931 dma_maps->dmamap_xfer->dm_mapsize, 932 (flags & WDC_DMA_READ) ? 933 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 934 935 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 936 #ifdef DIAGNOSTIC 937 /* A segment must not cross a 64k boundary */ 938 { 939 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 940 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 941 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 942 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 943 printf("pciide_dma: segment %d physical addr 0x%lx" 944 " len 0x%lx not properly aligned\n", 945 seg, phys, len); 946 panic("pciide_dma: buf align"); 947 } 948 } 949 #endif 950 dma_maps->dma_table[seg].base_addr = 951 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 952 dma_maps->dma_table[seg].byte_count = 953 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 954 IDEDMA_BYTE_COUNT_MASK); 955 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 956 seg, le32toh(dma_maps->dma_table[seg].byte_count), 957 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 958 959 } 960 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 961 htole32(IDEDMA_BYTE_COUNT_EOT); 962 963 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 964 dma_maps->dmamap_table->dm_mapsize, 965 BUS_DMASYNC_PREWRITE); 966 967 /* Maps are ready. Start DMA function */ 968 #ifdef DIAGNOSTIC 969 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 970 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 971 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 972 panic("pciide_dma_init: table align"); 973 } 974 #endif 975 976 /* Clear status bits */ 977 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 978 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, 979 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 980 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel)); 981 /* Write table addr */ 982 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 983 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel, 984 dma_maps->dmamap_table->dm_segs[0].ds_addr); 985 /* set read/write */ 986 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 987 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 988 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 989 /* remember flags */ 990 dma_maps->dma_flags = flags; 991 return 0; 992 } 993 994 void 995 pciide_dma_start(v, channel, drive) 996 void *v; 997 int channel, drive; 998 { 999 struct pciide_softc *sc = v; 1000 1001 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 1002 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1003 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1004 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1005 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START); 1006 } 1007 1008 int 1009 pciide_dma_finish(v, channel, drive, force) 1010 void *v; 1011 int channel, drive; 1012 int force; 1013 { 1014 struct pciide_softc *sc = v; 1015 u_int8_t status; 1016 int error = 0; 1017 struct pciide_dma_maps *dma_maps = 1018 &sc->pciide_channels[channel].dma_maps[drive]; 1019 1020 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1021 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel); 1022 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1023 DEBUG_XFERS); 1024 1025 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 1026 return WDC_DMAST_NOIRQ; 1027 1028 /* stop DMA channel */ 1029 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1030 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1031 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1032 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START); 1033 1034 /* Unload the map of the data buffer */ 1035 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1036 dma_maps->dmamap_xfer->dm_mapsize, 1037 (dma_maps->dma_flags & WDC_DMA_READ) ? 1038 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1039 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1040 1041 if ((status & IDEDMA_CTL_ERR) != 0) { 1042 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1043 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1044 error |= WDC_DMAST_ERR; 1045 } 1046 1047 if ((status & IDEDMA_CTL_INTR) == 0) { 1048 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1049 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1050 drive, status); 1051 error |= WDC_DMAST_NOIRQ; 1052 } 1053 1054 if ((status & IDEDMA_CTL_ACT) != 0) { 1055 /* data underrun, may be a valid condition for ATAPI */ 1056 error |= WDC_DMAST_UNDER; 1057 } 1058 return error; 1059 } 1060 1061 void 1062 pciide_irqack(chp) 1063 struct channel_softc *chp; 1064 { 1065 struct pciide_channel *cp = (struct pciide_channel*)chp; 1066 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1067 1068 /* clear status bits in IDE DMA registers */ 1069 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1070 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel, 1071 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1072 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel)); 1073 } 1074 1075 /* some common code used by several chip_map */ 1076 int 1077 pciide_chansetup(sc, channel, interface) 1078 struct pciide_softc *sc; 1079 int channel; 1080 pcireg_t interface; 1081 { 1082 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1083 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1084 cp->name = PCIIDE_CHANNEL_NAME(channel); 1085 cp->wdc_channel.channel = channel; 1086 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1087 cp->wdc_channel.ch_queue = 1088 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1089 if (cp->wdc_channel.ch_queue == NULL) { 1090 printf("%s %s channel: " 1091 "can't allocate memory for command queue", 1092 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1093 return 0; 1094 } 1095 printf("%s: %s channel %s to %s mode\n", 1096 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1097 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 1098 "configured" : "wired", 1099 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 1100 "native-PCI" : "compatibility"); 1101 return 1; 1102 } 1103 1104 /* some common code used by several chip channel_map */ 1105 void 1106 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1107 struct pci_attach_args *pa; 1108 struct pciide_channel *cp; 1109 pcireg_t interface; 1110 bus_size_t *cmdsizep, *ctlsizep; 1111 int (*pci_intr) __P((void *)); 1112 { 1113 struct channel_softc *wdc_cp = &cp->wdc_channel; 1114 1115 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1116 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1117 pci_intr); 1118 else 1119 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1120 wdc_cp->channel, cmdsizep, ctlsizep); 1121 1122 if (cp->hw_ok == 0) 1123 return; 1124 wdc_cp->data32iot = wdc_cp->cmd_iot; 1125 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1126 wdcattach(wdc_cp); 1127 } 1128 1129 /* 1130 * Generic code to call to know if a channel can be disabled. Return 1 1131 * if channel can be disabled, 0 if not 1132 */ 1133 int 1134 pciide_chan_candisable(cp) 1135 struct pciide_channel *cp; 1136 { 1137 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1138 struct channel_softc *wdc_cp = &cp->wdc_channel; 1139 1140 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1141 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1142 printf("%s: disabling %s channel (no drives)\n", 1143 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1144 cp->hw_ok = 0; 1145 return 1; 1146 } 1147 return 0; 1148 } 1149 1150 /* 1151 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1152 * Set hw_ok=0 on failure 1153 */ 1154 void 1155 pciide_map_compat_intr(pa, cp, compatchan, interface) 1156 struct pci_attach_args *pa; 1157 struct pciide_channel *cp; 1158 int compatchan, interface; 1159 { 1160 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1161 struct channel_softc *wdc_cp = &cp->wdc_channel; 1162 1163 if (cp->hw_ok == 0) 1164 return; 1165 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1166 return; 1167 1168 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1169 pa, compatchan, pciide_compat_intr, cp); 1170 if (cp->ih == NULL) { 1171 printf("%s: no compatibility interrupt for use by %s " 1172 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1173 cp->hw_ok = 0; 1174 } 1175 } 1176 1177 void 1178 pciide_print_modes(cp) 1179 struct pciide_channel *cp; 1180 { 1181 wdc_print_modes(&cp->wdc_channel); 1182 } 1183 1184 void 1185 default_chip_map(sc, pa) 1186 struct pciide_softc *sc; 1187 struct pci_attach_args *pa; 1188 { 1189 struct pciide_channel *cp; 1190 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1191 pcireg_t csr; 1192 int channel, drive; 1193 struct ata_drive_datas *drvp; 1194 u_int8_t idedma_ctl; 1195 bus_size_t cmdsize, ctlsize; 1196 char *failreason; 1197 1198 if (pciide_chipen(sc, pa) == 0) 1199 return; 1200 1201 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1202 printf("%s: bus-master DMA support present", 1203 sc->sc_wdcdev.sc_dev.dv_xname); 1204 if (sc->sc_pp == &default_product_desc && 1205 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1206 PCIIDE_OPTIONS_DMA) == 0) { 1207 printf(", but unused (no driver support)"); 1208 sc->sc_dma_ok = 0; 1209 } else { 1210 pciide_mapreg_dma(sc, pa); 1211 if (sc->sc_dma_ok != 0) 1212 printf(", used without full driver " 1213 "support"); 1214 } 1215 } else { 1216 printf("%s: hardware does not support DMA", 1217 sc->sc_wdcdev.sc_dev.dv_xname); 1218 sc->sc_dma_ok = 0; 1219 } 1220 printf("\n"); 1221 if (sc->sc_dma_ok) { 1222 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1223 sc->sc_wdcdev.irqack = pciide_irqack; 1224 } 1225 sc->sc_wdcdev.PIO_cap = 0; 1226 sc->sc_wdcdev.DMA_cap = 0; 1227 1228 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1229 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1230 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1231 1232 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1233 cp = &sc->pciide_channels[channel]; 1234 if (pciide_chansetup(sc, channel, interface) == 0) 1235 continue; 1236 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1237 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1238 &ctlsize, pciide_pci_intr); 1239 } else { 1240 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1241 channel, &cmdsize, &ctlsize); 1242 } 1243 if (cp->hw_ok == 0) 1244 continue; 1245 /* 1246 * Check to see if something appears to be there. 1247 */ 1248 failreason = NULL; 1249 if (!wdcprobe(&cp->wdc_channel)) { 1250 failreason = "not responding; disabled or no drives?"; 1251 goto next; 1252 } 1253 /* 1254 * Now, make sure it's actually attributable to this PCI IDE 1255 * channel by trying to access the channel again while the 1256 * PCI IDE controller's I/O space is disabled. (If the 1257 * channel no longer appears to be there, it belongs to 1258 * this controller.) YUCK! 1259 */ 1260 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1261 PCI_COMMAND_STATUS_REG); 1262 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1263 csr & ~PCI_COMMAND_IO_ENABLE); 1264 if (wdcprobe(&cp->wdc_channel)) 1265 failreason = "other hardware responding at addresses"; 1266 pci_conf_write(sc->sc_pc, sc->sc_tag, 1267 PCI_COMMAND_STATUS_REG, csr); 1268 next: 1269 if (failreason) { 1270 printf("%s: %s channel ignored (%s)\n", 1271 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1272 failreason); 1273 cp->hw_ok = 0; 1274 bus_space_unmap(cp->wdc_channel.cmd_iot, 1275 cp->wdc_channel.cmd_ioh, cmdsize); 1276 bus_space_unmap(cp->wdc_channel.ctl_iot, 1277 cp->wdc_channel.ctl_ioh, ctlsize); 1278 } else { 1279 pciide_map_compat_intr(pa, cp, channel, interface); 1280 } 1281 if (cp->hw_ok) { 1282 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1283 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1284 wdcattach(&cp->wdc_channel); 1285 } 1286 } 1287 1288 if (sc->sc_dma_ok == 0) 1289 return; 1290 1291 /* Allocate DMA maps */ 1292 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1293 idedma_ctl = 0; 1294 cp = &sc->pciide_channels[channel]; 1295 for (drive = 0; drive < 2; drive++) { 1296 drvp = &cp->wdc_channel.ch_drive[drive]; 1297 /* If no drive, skip */ 1298 if ((drvp->drive_flags & DRIVE) == 0) 1299 continue; 1300 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1301 continue; 1302 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1303 /* Abort DMA setup */ 1304 printf("%s:%d:%d: can't allocate DMA maps, " 1305 "using PIO transfers\n", 1306 sc->sc_wdcdev.sc_dev.dv_xname, 1307 channel, drive); 1308 drvp->drive_flags &= ~DRIVE_DMA; 1309 } 1310 printf("%s:%d:%d: using DMA data transfers\n", 1311 sc->sc_wdcdev.sc_dev.dv_xname, 1312 channel, drive); 1313 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1314 } 1315 if (idedma_ctl != 0) { 1316 /* Add software bits in status register */ 1317 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1318 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1319 idedma_ctl); 1320 } 1321 } 1322 } 1323 1324 void 1325 piix_chip_map(sc, pa) 1326 struct pciide_softc *sc; 1327 struct pci_attach_args *pa; 1328 { 1329 struct pciide_channel *cp; 1330 int channel; 1331 u_int32_t idetim; 1332 bus_size_t cmdsize, ctlsize; 1333 1334 if (pciide_chipen(sc, pa) == 0) 1335 return; 1336 1337 printf("%s: bus-master DMA support present", 1338 sc->sc_wdcdev.sc_dev.dv_xname); 1339 pciide_mapreg_dma(sc, pa); 1340 printf("\n"); 1341 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1342 WDC_CAPABILITY_MODE; 1343 if (sc->sc_dma_ok) { 1344 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1345 sc->sc_wdcdev.irqack = pciide_irqack; 1346 switch(sc->sc_pp->ide_product) { 1347 case PCI_PRODUCT_INTEL_82371AB_IDE: 1348 case PCI_PRODUCT_INTEL_82440MX_IDE: 1349 case PCI_PRODUCT_INTEL_82801AA_IDE: 1350 case PCI_PRODUCT_INTEL_82801AB_IDE: 1351 case PCI_PRODUCT_INTEL_82801BA_IDE: 1352 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1353 } 1354 } 1355 sc->sc_wdcdev.PIO_cap = 4; 1356 sc->sc_wdcdev.DMA_cap = 2; 1357 switch(sc->sc_pp->ide_product) { 1358 case PCI_PRODUCT_INTEL_82801AA_IDE: 1359 sc->sc_wdcdev.UDMA_cap = 4; 1360 break; 1361 case PCI_PRODUCT_INTEL_82801BA_IDE: 1362 sc->sc_wdcdev.UDMA_cap = 5; 1363 break; 1364 default: 1365 sc->sc_wdcdev.UDMA_cap = 2; 1366 } 1367 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1368 sc->sc_wdcdev.set_modes = piix_setup_channel; 1369 else 1370 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1371 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1372 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1373 1374 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1375 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1376 DEBUG_PROBE); 1377 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1378 WDCDEBUG_PRINT((", sidetim=0x%x", 1379 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1380 DEBUG_PROBE); 1381 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1382 WDCDEBUG_PRINT((", udamreg 0x%x", 1383 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1384 DEBUG_PROBE); 1385 } 1386 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1387 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1388 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE) { 1389 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1390 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1391 DEBUG_PROBE); 1392 } 1393 1394 } 1395 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1396 1397 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1398 cp = &sc->pciide_channels[channel]; 1399 /* PIIX is compat-only */ 1400 if (pciide_chansetup(sc, channel, 0) == 0) 1401 continue; 1402 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1403 if ((PIIX_IDETIM_READ(idetim, channel) & 1404 PIIX_IDETIM_IDE) == 0) { 1405 printf("%s: %s channel ignored (disabled)\n", 1406 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1407 continue; 1408 } 1409 /* PIIX are compat-only pciide devices */ 1410 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1411 if (cp->hw_ok == 0) 1412 continue; 1413 if (pciide_chan_candisable(cp)) { 1414 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1415 channel); 1416 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1417 idetim); 1418 } 1419 pciide_map_compat_intr(pa, cp, channel, 0); 1420 if (cp->hw_ok == 0) 1421 continue; 1422 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1423 } 1424 1425 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1426 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1427 DEBUG_PROBE); 1428 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1429 WDCDEBUG_PRINT((", sidetim=0x%x", 1430 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1431 DEBUG_PROBE); 1432 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1433 WDCDEBUG_PRINT((", udamreg 0x%x", 1434 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1435 DEBUG_PROBE); 1436 } 1437 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1438 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1439 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE) { 1440 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1441 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1442 DEBUG_PROBE); 1443 } 1444 } 1445 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1446 } 1447 1448 void 1449 piix_setup_channel(chp) 1450 struct channel_softc *chp; 1451 { 1452 u_int8_t mode[2], drive; 1453 u_int32_t oidetim, idetim, idedma_ctl; 1454 struct pciide_channel *cp = (struct pciide_channel*)chp; 1455 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1456 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1457 1458 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1459 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1460 idedma_ctl = 0; 1461 1462 /* set up new idetim: Enable IDE registers decode */ 1463 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1464 chp->channel); 1465 1466 /* setup DMA */ 1467 pciide_channel_dma_setup(cp); 1468 1469 /* 1470 * Here we have to mess up with drives mode: PIIX can't have 1471 * different timings for master and slave drives. 1472 * We need to find the best combination. 1473 */ 1474 1475 /* If both drives supports DMA, take the lower mode */ 1476 if ((drvp[0].drive_flags & DRIVE_DMA) && 1477 (drvp[1].drive_flags & DRIVE_DMA)) { 1478 mode[0] = mode[1] = 1479 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1480 drvp[0].DMA_mode = mode[0]; 1481 drvp[1].DMA_mode = mode[1]; 1482 goto ok; 1483 } 1484 /* 1485 * If only one drive supports DMA, use its mode, and 1486 * put the other one in PIO mode 0 if mode not compatible 1487 */ 1488 if (drvp[0].drive_flags & DRIVE_DMA) { 1489 mode[0] = drvp[0].DMA_mode; 1490 mode[1] = drvp[1].PIO_mode; 1491 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1492 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1493 mode[1] = drvp[1].PIO_mode = 0; 1494 goto ok; 1495 } 1496 if (drvp[1].drive_flags & DRIVE_DMA) { 1497 mode[1] = drvp[1].DMA_mode; 1498 mode[0] = drvp[0].PIO_mode; 1499 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1500 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1501 mode[0] = drvp[0].PIO_mode = 0; 1502 goto ok; 1503 } 1504 /* 1505 * If both drives are not DMA, takes the lower mode, unless 1506 * one of them is PIO mode < 2 1507 */ 1508 if (drvp[0].PIO_mode < 2) { 1509 mode[0] = drvp[0].PIO_mode = 0; 1510 mode[1] = drvp[1].PIO_mode; 1511 } else if (drvp[1].PIO_mode < 2) { 1512 mode[1] = drvp[1].PIO_mode = 0; 1513 mode[0] = drvp[0].PIO_mode; 1514 } else { 1515 mode[0] = mode[1] = 1516 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1517 drvp[0].PIO_mode = mode[0]; 1518 drvp[1].PIO_mode = mode[1]; 1519 } 1520 ok: /* The modes are setup */ 1521 for (drive = 0; drive < 2; drive++) { 1522 if (drvp[drive].drive_flags & DRIVE_DMA) { 1523 idetim |= piix_setup_idetim_timings( 1524 mode[drive], 1, chp->channel); 1525 goto end; 1526 } 1527 } 1528 /* If we are there, none of the drives are DMA */ 1529 if (mode[0] >= 2) 1530 idetim |= piix_setup_idetim_timings( 1531 mode[0], 0, chp->channel); 1532 else 1533 idetim |= piix_setup_idetim_timings( 1534 mode[1], 0, chp->channel); 1535 end: /* 1536 * timing mode is now set up in the controller. Enable 1537 * it per-drive 1538 */ 1539 for (drive = 0; drive < 2; drive++) { 1540 /* If no drive, skip */ 1541 if ((drvp[drive].drive_flags & DRIVE) == 0) 1542 continue; 1543 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1544 if (drvp[drive].drive_flags & DRIVE_DMA) 1545 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1546 } 1547 if (idedma_ctl != 0) { 1548 /* Add software bits in status register */ 1549 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1550 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1551 idedma_ctl); 1552 } 1553 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1554 pciide_print_modes(cp); 1555 } 1556 1557 void 1558 piix3_4_setup_channel(chp) 1559 struct channel_softc *chp; 1560 { 1561 struct ata_drive_datas *drvp; 1562 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1563 struct pciide_channel *cp = (struct pciide_channel*)chp; 1564 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1565 int drive; 1566 int channel = chp->channel; 1567 1568 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1569 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1570 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1571 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1572 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1573 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1574 PIIX_SIDETIM_RTC_MASK(channel)); 1575 1576 idedma_ctl = 0; 1577 /* If channel disabled, no need to go further */ 1578 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1579 return; 1580 /* set up new idetim: Enable IDE registers decode */ 1581 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1582 1583 /* setup DMA if needed */ 1584 pciide_channel_dma_setup(cp); 1585 1586 for (drive = 0; drive < 2; drive++) { 1587 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 1588 PIIX_UDMATIM_SET(0x3, channel, drive)); 1589 drvp = &chp->ch_drive[drive]; 1590 /* If no drive, skip */ 1591 if ((drvp->drive_flags & DRIVE) == 0) 1592 continue; 1593 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1594 (drvp->drive_flags & DRIVE_UDMA) == 0)) 1595 goto pio; 1596 1597 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1598 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1599 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE) { 1600 ideconf |= PIIX_CONFIG_PINGPONG; 1601 } 1602 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE) { 1603 /* setup Ultra/100 */ 1604 if (drvp->UDMA_mode > 2 && 1605 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1606 drvp->UDMA_mode = 2; 1607 if (drvp->UDMA_mode > 4) { 1608 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 1609 } else { 1610 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 1611 if (drvp->UDMA_mode > 2) { 1612 ideconf |= PIIX_CONFIG_UDMA66(channel, 1613 drive); 1614 } else { 1615 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 1616 drive); 1617 } 1618 } 1619 } 1620 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 1621 /* setup Ultra/66 */ 1622 if (drvp->UDMA_mode > 2 && 1623 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1624 drvp->UDMA_mode = 2; 1625 if (drvp->UDMA_mode > 2) 1626 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 1627 else 1628 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 1629 } 1630 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1631 (drvp->drive_flags & DRIVE_UDMA)) { 1632 /* use Ultra/DMA */ 1633 drvp->drive_flags &= ~DRIVE_DMA; 1634 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 1635 udmareg |= PIIX_UDMATIM_SET( 1636 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 1637 } else { 1638 /* use Multiword DMA */ 1639 drvp->drive_flags &= ~DRIVE_UDMA; 1640 if (drive == 0) { 1641 idetim |= piix_setup_idetim_timings( 1642 drvp->DMA_mode, 1, channel); 1643 } else { 1644 sidetim |= piix_setup_sidetim_timings( 1645 drvp->DMA_mode, 1, channel); 1646 idetim =PIIX_IDETIM_SET(idetim, 1647 PIIX_IDETIM_SITRE, channel); 1648 } 1649 } 1650 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1651 1652 pio: /* use PIO mode */ 1653 idetim |= piix_setup_idetim_drvs(drvp); 1654 if (drive == 0) { 1655 idetim |= piix_setup_idetim_timings( 1656 drvp->PIO_mode, 0, channel); 1657 } else { 1658 sidetim |= piix_setup_sidetim_timings( 1659 drvp->PIO_mode, 0, channel); 1660 idetim =PIIX_IDETIM_SET(idetim, 1661 PIIX_IDETIM_SITRE, channel); 1662 } 1663 } 1664 if (idedma_ctl != 0) { 1665 /* Add software bits in status register */ 1666 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1667 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1668 idedma_ctl); 1669 } 1670 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1671 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 1672 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 1673 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 1674 pciide_print_modes(cp); 1675 } 1676 1677 1678 /* setup ISP and RTC fields, based on mode */ 1679 static u_int32_t 1680 piix_setup_idetim_timings(mode, dma, channel) 1681 u_int8_t mode; 1682 u_int8_t dma; 1683 u_int8_t channel; 1684 { 1685 1686 if (dma) 1687 return PIIX_IDETIM_SET(0, 1688 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 1689 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 1690 channel); 1691 else 1692 return PIIX_IDETIM_SET(0, 1693 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 1694 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 1695 channel); 1696 } 1697 1698 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 1699 static u_int32_t 1700 piix_setup_idetim_drvs(drvp) 1701 struct ata_drive_datas *drvp; 1702 { 1703 u_int32_t ret = 0; 1704 struct channel_softc *chp = drvp->chnl_softc; 1705 u_int8_t channel = chp->channel; 1706 u_int8_t drive = drvp->drive; 1707 1708 /* 1709 * If drive is using UDMA, timings setups are independant 1710 * So just check DMA and PIO here. 1711 */ 1712 if (drvp->drive_flags & DRIVE_DMA) { 1713 /* if mode = DMA mode 0, use compatible timings */ 1714 if ((drvp->drive_flags & DRIVE_DMA) && 1715 drvp->DMA_mode == 0) { 1716 drvp->PIO_mode = 0; 1717 return ret; 1718 } 1719 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1720 /* 1721 * PIO and DMA timings are the same, use fast timings for PIO 1722 * too, else use compat timings. 1723 */ 1724 if ((piix_isp_pio[drvp->PIO_mode] != 1725 piix_isp_dma[drvp->DMA_mode]) || 1726 (piix_rtc_pio[drvp->PIO_mode] != 1727 piix_rtc_dma[drvp->DMA_mode])) 1728 drvp->PIO_mode = 0; 1729 /* if PIO mode <= 2, use compat timings for PIO */ 1730 if (drvp->PIO_mode <= 2) { 1731 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 1732 channel); 1733 return ret; 1734 } 1735 } 1736 1737 /* 1738 * Now setup PIO modes. If mode < 2, use compat timings. 1739 * Else enable fast timings. Enable IORDY and prefetch/post 1740 * if PIO mode >= 3. 1741 */ 1742 1743 if (drvp->PIO_mode < 2) 1744 return ret; 1745 1746 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1747 if (drvp->PIO_mode >= 3) { 1748 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 1749 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 1750 } 1751 return ret; 1752 } 1753 1754 /* setup values in SIDETIM registers, based on mode */ 1755 static u_int32_t 1756 piix_setup_sidetim_timings(mode, dma, channel) 1757 u_int8_t mode; 1758 u_int8_t dma; 1759 u_int8_t channel; 1760 { 1761 if (dma) 1762 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 1763 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 1764 else 1765 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 1766 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 1767 } 1768 1769 void 1770 amd756_chip_map(sc, pa) 1771 struct pciide_softc *sc; 1772 struct pci_attach_args *pa; 1773 { 1774 struct pciide_channel *cp; 1775 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1776 int channel; 1777 pcireg_t chanenable; 1778 bus_size_t cmdsize, ctlsize; 1779 1780 if (pciide_chipen(sc, pa) == 0) 1781 return; 1782 printf("%s: bus-master DMA support present", 1783 sc->sc_wdcdev.sc_dev.dv_xname); 1784 pciide_mapreg_dma(sc, pa); 1785 printf("\n"); 1786 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1787 WDC_CAPABILITY_MODE; 1788 if (sc->sc_dma_ok) { 1789 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 1790 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 1791 sc->sc_wdcdev.irqack = pciide_irqack; 1792 } 1793 sc->sc_wdcdev.PIO_cap = 4; 1794 sc->sc_wdcdev.DMA_cap = 2; 1795 sc->sc_wdcdev.UDMA_cap = 4; 1796 sc->sc_wdcdev.set_modes = amd756_setup_channel; 1797 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1798 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1799 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 1800 1801 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable), 1802 DEBUG_PROBE); 1803 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1804 cp = &sc->pciide_channels[channel]; 1805 if (pciide_chansetup(sc, channel, interface) == 0) 1806 continue; 1807 1808 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 1809 printf("%s: %s channel ignored (disabled)\n", 1810 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1811 continue; 1812 } 1813 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1814 pciide_pci_intr); 1815 1816 if (pciide_chan_candisable(cp)) 1817 chanenable &= ~AMD756_CHAN_EN(channel); 1818 pciide_map_compat_intr(pa, cp, channel, interface); 1819 if (cp->hw_ok == 0) 1820 continue; 1821 1822 amd756_setup_channel(&cp->wdc_channel); 1823 } 1824 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 1825 chanenable); 1826 return; 1827 } 1828 1829 void 1830 amd756_setup_channel(chp) 1831 struct channel_softc *chp; 1832 { 1833 u_int32_t udmatim_reg, datatim_reg; 1834 u_int8_t idedma_ctl; 1835 int mode, drive; 1836 struct ata_drive_datas *drvp; 1837 struct pciide_channel *cp = (struct pciide_channel*)chp; 1838 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1839 #ifndef PCIIDE_AMD756_ENABLEDMA 1840 int rev = PCI_REVISION( 1841 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 1842 #endif 1843 1844 idedma_ctl = 0; 1845 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 1846 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 1847 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 1848 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 1849 1850 /* setup DMA if needed */ 1851 pciide_channel_dma_setup(cp); 1852 1853 for (drive = 0; drive < 2; drive++) { 1854 drvp = &chp->ch_drive[drive]; 1855 /* If no drive, skip */ 1856 if ((drvp->drive_flags & DRIVE) == 0) 1857 continue; 1858 /* add timing values, setup DMA if needed */ 1859 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1860 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 1861 mode = drvp->PIO_mode; 1862 goto pio; 1863 } 1864 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1865 (drvp->drive_flags & DRIVE_UDMA)) { 1866 /* use Ultra/DMA */ 1867 drvp->drive_flags &= ~DRIVE_DMA; 1868 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 1869 AMD756_UDMA_EN_MTH(chp->channel, drive) | 1870 AMD756_UDMA_TIME(chp->channel, drive, 1871 amd756_udma_tim[drvp->UDMA_mode]); 1872 /* can use PIO timings, MW DMA unused */ 1873 mode = drvp->PIO_mode; 1874 } else { 1875 /* use Multiword DMA, but only if revision is OK */ 1876 drvp->drive_flags &= ~DRIVE_UDMA; 1877 #ifndef PCIIDE_AMD756_ENABLEDMA 1878 /* 1879 * The workaround doesn't seem to be necessary 1880 * with all drives, so it can be disabled by 1881 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 1882 * triggered. 1883 */ 1884 if (AMD756_CHIPREV_DISABLEDMA(rev)) { 1885 printf("%s:%d:%d: multi-word DMA disabled due " 1886 "to chip revision\n", 1887 sc->sc_wdcdev.sc_dev.dv_xname, 1888 chp->channel, drive); 1889 mode = drvp->PIO_mode; 1890 drvp->drive_flags &= ~DRIVE_DMA; 1891 goto pio; 1892 } 1893 #endif 1894 /* mode = min(pio, dma+2) */ 1895 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 1896 mode = drvp->PIO_mode; 1897 else 1898 mode = drvp->DMA_mode + 2; 1899 } 1900 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1901 1902 pio: /* setup PIO mode */ 1903 if (mode <= 2) { 1904 drvp->DMA_mode = 0; 1905 drvp->PIO_mode = 0; 1906 mode = 0; 1907 } else { 1908 drvp->PIO_mode = mode; 1909 drvp->DMA_mode = mode - 2; 1910 } 1911 datatim_reg |= 1912 AMD756_DATATIM_PULSE(chp->channel, drive, 1913 amd756_pio_set[mode]) | 1914 AMD756_DATATIM_RECOV(chp->channel, drive, 1915 amd756_pio_rec[mode]); 1916 } 1917 if (idedma_ctl != 0) { 1918 /* Add software bits in status register */ 1919 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1920 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1921 idedma_ctl); 1922 } 1923 pciide_print_modes(cp); 1924 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 1925 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 1926 } 1927 1928 void 1929 apollo_chip_map(sc, pa) 1930 struct pciide_softc *sc; 1931 struct pci_attach_args *pa; 1932 { 1933 struct pciide_channel *cp; 1934 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1935 int rev = PCI_REVISION(pa->pa_class); 1936 int channel; 1937 u_int32_t ideconf, udma_conf, old_udma_conf; 1938 bus_size_t cmdsize, ctlsize; 1939 1940 if (pciide_chipen(sc, pa) == 0) 1941 return; 1942 printf("%s: bus-master DMA support present", 1943 sc->sc_wdcdev.sc_dev.dv_xname); 1944 pciide_mapreg_dma(sc, pa); 1945 printf("\n"); 1946 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1947 WDC_CAPABILITY_MODE; 1948 if (sc->sc_dma_ok) { 1949 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1950 sc->sc_wdcdev.irqack = pciide_irqack; 1951 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE 1952 && rev >= 6) 1953 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1954 } 1955 sc->sc_wdcdev.PIO_cap = 4; 1956 sc->sc_wdcdev.DMA_cap = 2; 1957 sc->sc_wdcdev.UDMA_cap = 2; 1958 sc->sc_wdcdev.set_modes = apollo_setup_channel; 1959 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1960 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1961 1962 old_udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 1963 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 1964 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 1965 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 1966 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 1967 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 1968 old_udma_conf), 1969 DEBUG_PROBE); 1970 pci_conf_write(sc->sc_pc, sc->sc_tag, 1971 old_udma_conf | (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) | 1972 APO_UDMA_EN_MTH(0, 0) | APO_UDMA_CLK66(0)), 1973 APO_UDMA); 1974 udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 1975 WDCDEBUG_PRINT(("apollo_chip_map: APO_UDMA now 0x%x\n", udma_conf), 1976 DEBUG_PROBE); 1977 if ((udma_conf & (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) | 1978 APO_UDMA_EN_MTH(0, 0))) == 1979 (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) | 1980 APO_UDMA_EN_MTH(0, 0))) { 1981 if ((udma_conf & APO_UDMA_CLK66(0)) == 1982 APO_UDMA_CLK66(0)) { 1983 printf("%s: Ultra/66 capable\n", 1984 sc->sc_wdcdev.sc_dev.dv_xname); 1985 sc->sc_wdcdev.UDMA_cap = 4; 1986 } else { 1987 printf("%s: Ultra/33 capable\n", 1988 sc->sc_wdcdev.sc_dev.dv_xname); 1989 sc->sc_wdcdev.UDMA_cap = 2; 1990 } 1991 } else { 1992 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_UDMA; 1993 } 1994 pci_conf_write(sc->sc_pc, sc->sc_tag, old_udma_conf, APO_UDMA); 1995 1996 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1997 cp = &sc->pciide_channels[channel]; 1998 if (pciide_chansetup(sc, channel, interface) == 0) 1999 continue; 2000 2001 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 2002 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 2003 printf("%s: %s channel ignored (disabled)\n", 2004 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2005 continue; 2006 } 2007 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2008 pciide_pci_intr); 2009 if (cp->hw_ok == 0) 2010 continue; 2011 if (pciide_chan_candisable(cp)) { 2012 ideconf &= ~APO_IDECONF_EN(channel); 2013 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 2014 ideconf); 2015 } 2016 pciide_map_compat_intr(pa, cp, channel, interface); 2017 2018 if (cp->hw_ok == 0) 2019 continue; 2020 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 2021 } 2022 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2023 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2024 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 2025 } 2026 2027 void 2028 apollo_setup_channel(chp) 2029 struct channel_softc *chp; 2030 { 2031 u_int32_t udmatim_reg, datatim_reg; 2032 u_int8_t idedma_ctl; 2033 int mode, drive; 2034 struct ata_drive_datas *drvp; 2035 struct pciide_channel *cp = (struct pciide_channel*)chp; 2036 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2037 2038 idedma_ctl = 0; 2039 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 2040 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 2041 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 2042 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 2043 2044 /* setup DMA if needed */ 2045 pciide_channel_dma_setup(cp); 2046 2047 /* 2048 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 2049 * downgrade to Ultra/33 if needed 2050 */ 2051 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 2052 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 2053 /* both drives UDMA */ 2054 if (chp->ch_drive[0].UDMA_mode > 2 && 2055 chp->ch_drive[1].UDMA_mode <= 2) { 2056 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 2057 chp->ch_drive[0].UDMA_mode = 2; 2058 } else if (chp->ch_drive[1].UDMA_mode > 2 && 2059 chp->ch_drive[0].UDMA_mode <= 2) { 2060 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 2061 chp->ch_drive[1].UDMA_mode = 2; 2062 } 2063 } 2064 2065 for (drive = 0; drive < 2; drive++) { 2066 drvp = &chp->ch_drive[drive]; 2067 /* If no drive, skip */ 2068 if ((drvp->drive_flags & DRIVE) == 0) 2069 continue; 2070 /* add timing values, setup DMA if needed */ 2071 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2072 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2073 mode = drvp->PIO_mode; 2074 goto pio; 2075 } 2076 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2077 (drvp->drive_flags & DRIVE_UDMA)) { 2078 /* use Ultra/DMA */ 2079 drvp->drive_flags &= ~DRIVE_DMA; 2080 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2081 APO_UDMA_EN_MTH(chp->channel, drive) | 2082 APO_UDMA_TIME(chp->channel, drive, 2083 apollo_udma_tim[drvp->UDMA_mode]); 2084 if (drvp->UDMA_mode > 2) 2085 udmatim_reg |= 2086 APO_UDMA_CLK66(chp->channel); 2087 /* can use PIO timings, MW DMA unused */ 2088 mode = drvp->PIO_mode; 2089 } else { 2090 /* use Multiword DMA */ 2091 drvp->drive_flags &= ~DRIVE_UDMA; 2092 /* mode = min(pio, dma+2) */ 2093 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2094 mode = drvp->PIO_mode; 2095 else 2096 mode = drvp->DMA_mode + 2; 2097 } 2098 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2099 2100 pio: /* setup PIO mode */ 2101 if (mode <= 2) { 2102 drvp->DMA_mode = 0; 2103 drvp->PIO_mode = 0; 2104 mode = 0; 2105 } else { 2106 drvp->PIO_mode = mode; 2107 drvp->DMA_mode = mode - 2; 2108 } 2109 datatim_reg |= 2110 APO_DATATIM_PULSE(chp->channel, drive, 2111 apollo_pio_set[mode]) | 2112 APO_DATATIM_RECOV(chp->channel, drive, 2113 apollo_pio_rec[mode]); 2114 } 2115 if (idedma_ctl != 0) { 2116 /* Add software bits in status register */ 2117 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2118 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2119 idedma_ctl); 2120 } 2121 pciide_print_modes(cp); 2122 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2123 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2124 } 2125 2126 void 2127 cmd_channel_map(pa, sc, channel) 2128 struct pci_attach_args *pa; 2129 struct pciide_softc *sc; 2130 int channel; 2131 { 2132 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2133 bus_size_t cmdsize, ctlsize; 2134 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2135 int interface; 2136 2137 /* 2138 * The 0648/0649 can be told to identify as a RAID controller. 2139 * In this case, we have to fake interface 2140 */ 2141 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2142 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2143 PCIIDE_INTERFACE_SETTABLE(1); 2144 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2145 CMD_CONF_DSA1) 2146 interface |= PCIIDE_INTERFACE_PCI(0) | 2147 PCIIDE_INTERFACE_PCI(1); 2148 } else { 2149 interface = PCI_INTERFACE(pa->pa_class); 2150 } 2151 2152 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2153 cp->name = PCIIDE_CHANNEL_NAME(channel); 2154 cp->wdc_channel.channel = channel; 2155 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2156 2157 if (channel > 0) { 2158 cp->wdc_channel.ch_queue = 2159 sc->pciide_channels[0].wdc_channel.ch_queue; 2160 } else { 2161 cp->wdc_channel.ch_queue = 2162 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2163 } 2164 if (cp->wdc_channel.ch_queue == NULL) { 2165 printf("%s %s channel: " 2166 "can't allocate memory for command queue", 2167 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2168 return; 2169 } 2170 2171 printf("%s: %s channel %s to %s mode\n", 2172 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2173 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2174 "configured" : "wired", 2175 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2176 "native-PCI" : "compatibility"); 2177 2178 /* 2179 * with a CMD PCI64x, if we get here, the first channel is enabled: 2180 * there's no way to disable the first channel without disabling 2181 * the whole device 2182 */ 2183 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2184 printf("%s: %s channel ignored (disabled)\n", 2185 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2186 return; 2187 } 2188 2189 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2190 if (cp->hw_ok == 0) 2191 return; 2192 if (channel == 1) { 2193 if (pciide_chan_candisable(cp)) { 2194 ctrl &= ~CMD_CTRL_2PORT; 2195 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2196 CMD_CTRL, ctrl); 2197 } 2198 } 2199 pciide_map_compat_intr(pa, cp, channel, interface); 2200 } 2201 2202 int 2203 cmd_pci_intr(arg) 2204 void *arg; 2205 { 2206 struct pciide_softc *sc = arg; 2207 struct pciide_channel *cp; 2208 struct channel_softc *wdc_cp; 2209 int i, rv, crv; 2210 u_int32_t priirq, secirq; 2211 2212 rv = 0; 2213 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2214 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2215 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2216 cp = &sc->pciide_channels[i]; 2217 wdc_cp = &cp->wdc_channel; 2218 /* If a compat channel skip. */ 2219 if (cp->compat) 2220 continue; 2221 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2222 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2223 crv = wdcintr(wdc_cp); 2224 if (crv == 0) 2225 printf("%s:%d: bogus intr\n", 2226 sc->sc_wdcdev.sc_dev.dv_xname, i); 2227 else 2228 rv = 1; 2229 } 2230 } 2231 return rv; 2232 } 2233 2234 void 2235 cmd_chip_map(sc, pa) 2236 struct pciide_softc *sc; 2237 struct pci_attach_args *pa; 2238 { 2239 int channel; 2240 2241 /* 2242 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2243 * and base adresses registers can be disabled at 2244 * hardware level. In this case, the device is wired 2245 * in compat mode and its first channel is always enabled, 2246 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2247 * In fact, it seems that the first channel of the CMD PCI0640 2248 * can't be disabled. 2249 */ 2250 2251 #ifdef PCIIDE_CMD064x_DISABLE 2252 if (pciide_chipen(sc, pa) == 0) 2253 return; 2254 #endif 2255 2256 printf("%s: hardware does not support DMA\n", 2257 sc->sc_wdcdev.sc_dev.dv_xname); 2258 sc->sc_dma_ok = 0; 2259 2260 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2261 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2262 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2263 2264 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2265 cmd_channel_map(pa, sc, channel); 2266 } 2267 } 2268 2269 void 2270 cmd0643_9_chip_map(sc, pa) 2271 struct pciide_softc *sc; 2272 struct pci_attach_args *pa; 2273 { 2274 struct pciide_channel *cp; 2275 int channel; 2276 int rev = PCI_REVISION( 2277 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2278 2279 /* 2280 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2281 * and base adresses registers can be disabled at 2282 * hardware level. In this case, the device is wired 2283 * in compat mode and its first channel is always enabled, 2284 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2285 * In fact, it seems that the first channel of the CMD PCI0640 2286 * can't be disabled. 2287 */ 2288 2289 #ifdef PCIIDE_CMD064x_DISABLE 2290 if (pciide_chipen(sc, pa) == 0) 2291 return; 2292 #endif 2293 printf("%s: bus-master DMA support present", 2294 sc->sc_wdcdev.sc_dev.dv_xname); 2295 pciide_mapreg_dma(sc, pa); 2296 printf("\n"); 2297 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2298 WDC_CAPABILITY_MODE; 2299 if (sc->sc_dma_ok) { 2300 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2301 switch (sc->sc_pp->ide_product) { 2302 case PCI_PRODUCT_CMDTECH_649: 2303 case PCI_PRODUCT_CMDTECH_648: 2304 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2305 sc->sc_wdcdev.UDMA_cap = 4; 2306 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2307 break; 2308 case PCI_PRODUCT_CMDTECH_646: 2309 if (rev >= CMD0646U2_REV) { 2310 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2311 sc->sc_wdcdev.UDMA_cap = 2; 2312 } else if (rev >= CMD0646U_REV) { 2313 /* 2314 * Linux's driver claims that the 646U is broken 2315 * with UDMA. Only enable it if we know what we're 2316 * doing 2317 */ 2318 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2319 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2320 sc->sc_wdcdev.UDMA_cap = 2; 2321 #endif 2322 /* explicitely disable UDMA */ 2323 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2324 CMD_UDMATIM(0), 0); 2325 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2326 CMD_UDMATIM(1), 0); 2327 } 2328 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2329 break; 2330 default: 2331 sc->sc_wdcdev.irqack = pciide_irqack; 2332 } 2333 } 2334 2335 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2336 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2337 sc->sc_wdcdev.PIO_cap = 4; 2338 sc->sc_wdcdev.DMA_cap = 2; 2339 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2340 2341 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2342 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2343 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2344 DEBUG_PROBE); 2345 2346 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2347 cp = &sc->pciide_channels[channel]; 2348 cmd_channel_map(pa, sc, channel); 2349 if (cp->hw_ok == 0) 2350 continue; 2351 cmd0643_9_setup_channel(&cp->wdc_channel); 2352 } 2353 /* 2354 * note - this also makes sure we clear the irq disable and reset 2355 * bits 2356 */ 2357 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2358 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2359 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2360 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2361 DEBUG_PROBE); 2362 } 2363 2364 void 2365 cmd0643_9_setup_channel(chp) 2366 struct channel_softc *chp; 2367 { 2368 struct ata_drive_datas *drvp; 2369 u_int8_t tim; 2370 u_int32_t idedma_ctl, udma_reg; 2371 int drive; 2372 struct pciide_channel *cp = (struct pciide_channel*)chp; 2373 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2374 2375 idedma_ctl = 0; 2376 /* setup DMA if needed */ 2377 pciide_channel_dma_setup(cp); 2378 2379 for (drive = 0; drive < 2; drive++) { 2380 drvp = &chp->ch_drive[drive]; 2381 /* If no drive, skip */ 2382 if ((drvp->drive_flags & DRIVE) == 0) 2383 continue; 2384 /* add timing values, setup DMA if needed */ 2385 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2386 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2387 if (drvp->drive_flags & DRIVE_UDMA) { 2388 /* UltraDMA on a 646U2, 0648 or 0649 */ 2389 drvp->drive_flags &= ~DRIVE_DMA; 2390 udma_reg = pciide_pci_read(sc->sc_pc, 2391 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2392 if (drvp->UDMA_mode > 2 && 2393 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2394 CMD_BICSR) & 2395 CMD_BICSR_80(chp->channel)) == 0) 2396 drvp->UDMA_mode = 2; 2397 if (drvp->UDMA_mode > 2) 2398 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2399 else if (sc->sc_wdcdev.UDMA_cap > 2) 2400 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2401 udma_reg |= CMD_UDMATIM_UDMA(drive); 2402 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2403 CMD_UDMATIM_TIM_OFF(drive)); 2404 udma_reg |= 2405 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 2406 CMD_UDMATIM_TIM_OFF(drive)); 2407 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2408 CMD_UDMATIM(chp->channel), udma_reg); 2409 } else { 2410 /* 2411 * use Multiword DMA. 2412 * Timings will be used for both PIO and DMA, 2413 * so adjust DMA mode if needed 2414 * if we have a 0646U2/8/9, turn off UDMA 2415 */ 2416 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2417 udma_reg = pciide_pci_read(sc->sc_pc, 2418 sc->sc_tag, 2419 CMD_UDMATIM(chp->channel)); 2420 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2421 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2422 CMD_UDMATIM(chp->channel), 2423 udma_reg); 2424 } 2425 if (drvp->PIO_mode >= 3 && 2426 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2427 drvp->DMA_mode = drvp->PIO_mode - 2; 2428 } 2429 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2430 } 2431 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2432 } 2433 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2434 CMD_DATA_TIM(chp->channel, drive), tim); 2435 } 2436 if (idedma_ctl != 0) { 2437 /* Add software bits in status register */ 2438 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2439 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2440 idedma_ctl); 2441 } 2442 pciide_print_modes(cp); 2443 } 2444 2445 void 2446 cmd646_9_irqack(chp) 2447 struct channel_softc *chp; 2448 { 2449 u_int32_t priirq, secirq; 2450 struct pciide_channel *cp = (struct pciide_channel*)chp; 2451 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2452 2453 if (chp->channel == 0) { 2454 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2455 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2456 } else { 2457 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2458 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2459 } 2460 pciide_irqack(chp); 2461 } 2462 2463 void 2464 cy693_chip_map(sc, pa) 2465 struct pciide_softc *sc; 2466 struct pci_attach_args *pa; 2467 { 2468 struct pciide_channel *cp; 2469 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2470 bus_size_t cmdsize, ctlsize; 2471 2472 if (pciide_chipen(sc, pa) == 0) 2473 return; 2474 /* 2475 * this chip has 2 PCI IDE functions, one for primary and one for 2476 * secondary. So we need to call pciide_mapregs_compat() with 2477 * the real channel 2478 */ 2479 if (pa->pa_function == 1) { 2480 sc->sc_cy_compatchan = 0; 2481 } else if (pa->pa_function == 2) { 2482 sc->sc_cy_compatchan = 1; 2483 } else { 2484 printf("%s: unexpected PCI function %d\n", 2485 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2486 return; 2487 } 2488 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2489 printf("%s: bus-master DMA support present", 2490 sc->sc_wdcdev.sc_dev.dv_xname); 2491 pciide_mapreg_dma(sc, pa); 2492 } else { 2493 printf("%s: hardware does not support DMA", 2494 sc->sc_wdcdev.sc_dev.dv_xname); 2495 sc->sc_dma_ok = 0; 2496 } 2497 printf("\n"); 2498 2499 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 2500 if (sc->sc_cy_handle == NULL) { 2501 printf("%s: unable to map hyperCache control registers\n", 2502 sc->sc_wdcdev.sc_dev.dv_xname); 2503 sc->sc_dma_ok = 0; 2504 } 2505 2506 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2507 WDC_CAPABILITY_MODE; 2508 if (sc->sc_dma_ok) { 2509 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2510 sc->sc_wdcdev.irqack = pciide_irqack; 2511 } 2512 sc->sc_wdcdev.PIO_cap = 4; 2513 sc->sc_wdcdev.DMA_cap = 2; 2514 sc->sc_wdcdev.set_modes = cy693_setup_channel; 2515 2516 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2517 sc->sc_wdcdev.nchannels = 1; 2518 2519 /* Only one channel for this chip; if we are here it's enabled */ 2520 cp = &sc->pciide_channels[0]; 2521 sc->wdc_chanarray[0] = &cp->wdc_channel; 2522 cp->name = PCIIDE_CHANNEL_NAME(0); 2523 cp->wdc_channel.channel = 0; 2524 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2525 cp->wdc_channel.ch_queue = 2526 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2527 if (cp->wdc_channel.ch_queue == NULL) { 2528 printf("%s primary channel: " 2529 "can't allocate memory for command queue", 2530 sc->sc_wdcdev.sc_dev.dv_xname); 2531 return; 2532 } 2533 printf("%s: primary channel %s to ", 2534 sc->sc_wdcdev.sc_dev.dv_xname, 2535 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 2536 "configured" : "wired"); 2537 if (interface & PCIIDE_INTERFACE_PCI(0)) { 2538 printf("native-PCI"); 2539 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 2540 pciide_pci_intr); 2541 } else { 2542 printf("compatibility"); 2543 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 2544 &cmdsize, &ctlsize); 2545 } 2546 printf(" mode\n"); 2547 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2548 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2549 wdcattach(&cp->wdc_channel); 2550 if (pciide_chan_candisable(cp)) { 2551 pci_conf_write(sc->sc_pc, sc->sc_tag, 2552 PCI_COMMAND_STATUS_REG, 0); 2553 } 2554 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 2555 if (cp->hw_ok == 0) 2556 return; 2557 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 2558 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 2559 cy693_setup_channel(&cp->wdc_channel); 2560 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 2561 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 2562 } 2563 2564 void 2565 cy693_setup_channel(chp) 2566 struct channel_softc *chp; 2567 { 2568 struct ata_drive_datas *drvp; 2569 int drive; 2570 u_int32_t cy_cmd_ctrl; 2571 u_int32_t idedma_ctl; 2572 struct pciide_channel *cp = (struct pciide_channel*)chp; 2573 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2574 int dma_mode = -1; 2575 2576 cy_cmd_ctrl = idedma_ctl = 0; 2577 2578 /* setup DMA if needed */ 2579 pciide_channel_dma_setup(cp); 2580 2581 for (drive = 0; drive < 2; drive++) { 2582 drvp = &chp->ch_drive[drive]; 2583 /* If no drive, skip */ 2584 if ((drvp->drive_flags & DRIVE) == 0) 2585 continue; 2586 /* add timing values, setup DMA if needed */ 2587 if (drvp->drive_flags & DRIVE_DMA) { 2588 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2589 /* use Multiword DMA */ 2590 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 2591 dma_mode = drvp->DMA_mode; 2592 } 2593 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2594 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 2595 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2596 CY_CMD_CTRL_IOW_REC_OFF(drive)); 2597 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2598 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 2599 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2600 CY_CMD_CTRL_IOR_REC_OFF(drive)); 2601 } 2602 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 2603 chp->ch_drive[0].DMA_mode = dma_mode; 2604 chp->ch_drive[1].DMA_mode = dma_mode; 2605 2606 if (dma_mode == -1) 2607 dma_mode = 0; 2608 2609 if (sc->sc_cy_handle != NULL) { 2610 /* Note: `multiple' is implied. */ 2611 cy82c693_write(sc->sc_cy_handle, 2612 (sc->sc_cy_compatchan == 0) ? 2613 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 2614 } 2615 2616 pciide_print_modes(cp); 2617 2618 if (idedma_ctl != 0) { 2619 /* Add software bits in status register */ 2620 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2621 IDEDMA_CTL, idedma_ctl); 2622 } 2623 } 2624 2625 void 2626 sis_chip_map(sc, pa) 2627 struct pciide_softc *sc; 2628 struct pci_attach_args *pa; 2629 { 2630 struct pciide_channel *cp; 2631 int channel; 2632 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 2633 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2634 pcireg_t rev = PCI_REVISION(pa->pa_class); 2635 bus_size_t cmdsize, ctlsize; 2636 2637 if (pciide_chipen(sc, pa) == 0) 2638 return; 2639 printf("%s: bus-master DMA support present", 2640 sc->sc_wdcdev.sc_dev.dv_xname); 2641 pciide_mapreg_dma(sc, pa); 2642 printf("\n"); 2643 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2644 WDC_CAPABILITY_MODE; 2645 if (sc->sc_dma_ok) { 2646 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2647 sc->sc_wdcdev.irqack = pciide_irqack; 2648 if (rev > 0xd0) 2649 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2650 } 2651 2652 sc->sc_wdcdev.PIO_cap = 4; 2653 sc->sc_wdcdev.DMA_cap = 2; 2654 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) 2655 sc->sc_wdcdev.UDMA_cap = 2; 2656 sc->sc_wdcdev.set_modes = sis_setup_channel; 2657 2658 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2659 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2660 2661 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 2662 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 2663 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE); 2664 2665 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2666 cp = &sc->pciide_channels[channel]; 2667 if (pciide_chansetup(sc, channel, interface) == 0) 2668 continue; 2669 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 2670 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 2671 printf("%s: %s channel ignored (disabled)\n", 2672 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2673 continue; 2674 } 2675 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2676 pciide_pci_intr); 2677 if (cp->hw_ok == 0) 2678 continue; 2679 if (pciide_chan_candisable(cp)) { 2680 if (channel == 0) 2681 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 2682 else 2683 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 2684 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 2685 sis_ctr0); 2686 } 2687 pciide_map_compat_intr(pa, cp, channel, interface); 2688 if (cp->hw_ok == 0) 2689 continue; 2690 sis_setup_channel(&cp->wdc_channel); 2691 } 2692 } 2693 2694 void 2695 sis_setup_channel(chp) 2696 struct channel_softc *chp; 2697 { 2698 struct ata_drive_datas *drvp; 2699 int drive; 2700 u_int32_t sis_tim; 2701 u_int32_t idedma_ctl; 2702 struct pciide_channel *cp = (struct pciide_channel*)chp; 2703 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2704 2705 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 2706 "channel %d 0x%x\n", chp->channel, 2707 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 2708 DEBUG_PROBE); 2709 sis_tim = 0; 2710 idedma_ctl = 0; 2711 /* setup DMA if needed */ 2712 pciide_channel_dma_setup(cp); 2713 2714 for (drive = 0; drive < 2; drive++) { 2715 drvp = &chp->ch_drive[drive]; 2716 /* If no drive, skip */ 2717 if ((drvp->drive_flags & DRIVE) == 0) 2718 continue; 2719 /* add timing values, setup DMA if needed */ 2720 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2721 (drvp->drive_flags & DRIVE_UDMA) == 0) 2722 goto pio; 2723 2724 if (drvp->drive_flags & DRIVE_UDMA) { 2725 /* use Ultra/DMA */ 2726 drvp->drive_flags &= ~DRIVE_DMA; 2727 sis_tim |= sis_udma_tim[drvp->UDMA_mode] << 2728 SIS_TIM_UDMA_TIME_OFF(drive); 2729 sis_tim |= SIS_TIM_UDMA_EN(drive); 2730 } else { 2731 /* 2732 * use Multiword DMA 2733 * Timings will be used for both PIO and DMA, 2734 * so adjust DMA mode if needed 2735 */ 2736 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2737 drvp->PIO_mode = drvp->DMA_mode + 2; 2738 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2739 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2740 drvp->PIO_mode - 2 : 0; 2741 if (drvp->DMA_mode == 0) 2742 drvp->PIO_mode = 0; 2743 } 2744 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2745 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] << 2746 SIS_TIM_ACT_OFF(drive); 2747 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 2748 SIS_TIM_REC_OFF(drive); 2749 } 2750 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 2751 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 2752 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 2753 if (idedma_ctl != 0) { 2754 /* Add software bits in status register */ 2755 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2756 IDEDMA_CTL, idedma_ctl); 2757 } 2758 pciide_print_modes(cp); 2759 } 2760 2761 void 2762 acer_chip_map(sc, pa) 2763 struct pciide_softc *sc; 2764 struct pci_attach_args *pa; 2765 { 2766 struct pciide_channel *cp; 2767 int channel; 2768 pcireg_t cr, interface; 2769 bus_size_t cmdsize, ctlsize; 2770 2771 if (pciide_chipen(sc, pa) == 0) 2772 return; 2773 printf("%s: bus-master DMA support present", 2774 sc->sc_wdcdev.sc_dev.dv_xname); 2775 pciide_mapreg_dma(sc, pa); 2776 printf("\n"); 2777 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2778 WDC_CAPABILITY_MODE; 2779 if (sc->sc_dma_ok) { 2780 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2781 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2782 sc->sc_wdcdev.irqack = pciide_irqack; 2783 } 2784 2785 sc->sc_wdcdev.PIO_cap = 4; 2786 sc->sc_wdcdev.DMA_cap = 2; 2787 sc->sc_wdcdev.UDMA_cap = 2; 2788 sc->sc_wdcdev.set_modes = acer_setup_channel; 2789 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2790 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2791 2792 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 2793 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 2794 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 2795 2796 /* Enable "microsoft register bits" R/W. */ 2797 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 2798 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 2799 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 2800 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 2801 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 2802 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 2803 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 2804 ~ACER_CHANSTATUSREGS_RO); 2805 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 2806 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 2807 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 2808 /* Don't use cr, re-read the real register content instead */ 2809 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 2810 PCI_CLASS_REG)); 2811 2812 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2813 cp = &sc->pciide_channels[channel]; 2814 if (pciide_chansetup(sc, channel, interface) == 0) 2815 continue; 2816 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 2817 printf("%s: %s channel ignored (disabled)\n", 2818 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2819 continue; 2820 } 2821 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2822 acer_pci_intr); 2823 if (cp->hw_ok == 0) 2824 continue; 2825 if (pciide_chan_candisable(cp)) { 2826 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 2827 pci_conf_write(sc->sc_pc, sc->sc_tag, 2828 PCI_CLASS_REG, cr); 2829 } 2830 pciide_map_compat_intr(pa, cp, channel, interface); 2831 acer_setup_channel(&cp->wdc_channel); 2832 } 2833 } 2834 2835 void 2836 acer_setup_channel(chp) 2837 struct channel_softc *chp; 2838 { 2839 struct ata_drive_datas *drvp; 2840 int drive; 2841 u_int32_t acer_fifo_udma; 2842 u_int32_t idedma_ctl; 2843 struct pciide_channel *cp = (struct pciide_channel*)chp; 2844 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2845 2846 idedma_ctl = 0; 2847 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 2848 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 2849 acer_fifo_udma), DEBUG_PROBE); 2850 /* setup DMA if needed */ 2851 pciide_channel_dma_setup(cp); 2852 2853 for (drive = 0; drive < 2; drive++) { 2854 drvp = &chp->ch_drive[drive]; 2855 /* If no drive, skip */ 2856 if ((drvp->drive_flags & DRIVE) == 0) 2857 continue; 2858 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 2859 "channel %d drive %d 0x%x\n", chp->channel, drive, 2860 pciide_pci_read(sc->sc_pc, sc->sc_tag, 2861 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 2862 /* clear FIFO/DMA mode */ 2863 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 2864 ACER_UDMA_EN(chp->channel, drive) | 2865 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 2866 2867 /* add timing values, setup DMA if needed */ 2868 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2869 (drvp->drive_flags & DRIVE_UDMA) == 0) { 2870 acer_fifo_udma |= 2871 ACER_FTH_OPL(chp->channel, drive, 0x1); 2872 goto pio; 2873 } 2874 2875 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 2876 if (drvp->drive_flags & DRIVE_UDMA) { 2877 /* use Ultra/DMA */ 2878 drvp->drive_flags &= ~DRIVE_DMA; 2879 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 2880 acer_fifo_udma |= 2881 ACER_UDMA_TIM(chp->channel, drive, 2882 acer_udma[drvp->UDMA_mode]); 2883 } else { 2884 /* 2885 * use Multiword DMA 2886 * Timings will be used for both PIO and DMA, 2887 * so adjust DMA mode if needed 2888 */ 2889 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2890 drvp->PIO_mode = drvp->DMA_mode + 2; 2891 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2892 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2893 drvp->PIO_mode - 2 : 0; 2894 if (drvp->DMA_mode == 0) 2895 drvp->PIO_mode = 0; 2896 } 2897 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2898 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 2899 ACER_IDETIM(chp->channel, drive), 2900 acer_pio[drvp->PIO_mode]); 2901 } 2902 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 2903 acer_fifo_udma), DEBUG_PROBE); 2904 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 2905 if (idedma_ctl != 0) { 2906 /* Add software bits in status register */ 2907 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2908 IDEDMA_CTL, idedma_ctl); 2909 } 2910 pciide_print_modes(cp); 2911 } 2912 2913 int 2914 acer_pci_intr(arg) 2915 void *arg; 2916 { 2917 struct pciide_softc *sc = arg; 2918 struct pciide_channel *cp; 2919 struct channel_softc *wdc_cp; 2920 int i, rv, crv; 2921 u_int32_t chids; 2922 2923 rv = 0; 2924 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 2925 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2926 cp = &sc->pciide_channels[i]; 2927 wdc_cp = &cp->wdc_channel; 2928 /* If a compat channel skip. */ 2929 if (cp->compat) 2930 continue; 2931 if (chids & ACER_CHIDS_INT(i)) { 2932 crv = wdcintr(wdc_cp); 2933 if (crv == 0) 2934 printf("%s:%d: bogus intr\n", 2935 sc->sc_wdcdev.sc_dev.dv_xname, i); 2936 else 2937 rv = 1; 2938 } 2939 } 2940 return rv; 2941 } 2942 2943 void 2944 hpt_chip_map(sc, pa) 2945 struct pciide_softc *sc; 2946 struct pci_attach_args *pa; 2947 { 2948 struct pciide_channel *cp; 2949 int i, compatchan, revision; 2950 pcireg_t interface; 2951 bus_size_t cmdsize, ctlsize; 2952 2953 if (pciide_chipen(sc, pa) == 0) 2954 return; 2955 revision = PCI_REVISION(pa->pa_class); 2956 2957 /* 2958 * when the chip is in native mode it identifies itself as a 2959 * 'misc mass storage'. Fake interface in this case. 2960 */ 2961 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 2962 interface = PCI_INTERFACE(pa->pa_class); 2963 } else { 2964 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2965 PCIIDE_INTERFACE_PCI(0); 2966 if (revision == HPT370_REV) 2967 interface |= PCIIDE_INTERFACE_PCI(1); 2968 } 2969 2970 printf("%s: bus-master DMA support present", 2971 sc->sc_wdcdev.sc_dev.dv_xname); 2972 pciide_mapreg_dma(sc, pa); 2973 printf("\n"); 2974 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2975 WDC_CAPABILITY_MODE; 2976 if (sc->sc_dma_ok) { 2977 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2978 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2979 sc->sc_wdcdev.irqack = pciide_irqack; 2980 } 2981 sc->sc_wdcdev.PIO_cap = 4; 2982 sc->sc_wdcdev.DMA_cap = 2; 2983 2984 sc->sc_wdcdev.set_modes = hpt_setup_channel; 2985 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2986 if (revision == HPT366_REV) { 2987 sc->sc_wdcdev.UDMA_cap = 4; 2988 /* 2989 * The 366 has 2 PCI IDE functions, one for primary and one 2990 * for secondary. So we need to call pciide_mapregs_compat() 2991 * with the real channel 2992 */ 2993 if (pa->pa_function == 0) { 2994 compatchan = 0; 2995 } else if (pa->pa_function == 1) { 2996 compatchan = 1; 2997 } else { 2998 printf("%s: unexpected PCI function %d\n", 2999 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 3000 return; 3001 } 3002 sc->sc_wdcdev.nchannels = 1; 3003 } else { 3004 sc->sc_wdcdev.nchannels = 2; 3005 sc->sc_wdcdev.UDMA_cap = 5; 3006 } 3007 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3008 cp = &sc->pciide_channels[i]; 3009 if (sc->sc_wdcdev.nchannels > 1) { 3010 compatchan = i; 3011 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 3012 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 3013 printf("%s: %s channel ignored (disabled)\n", 3014 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3015 continue; 3016 } 3017 } 3018 if (pciide_chansetup(sc, i, interface) == 0) 3019 continue; 3020 if (interface & PCIIDE_INTERFACE_PCI(i)) { 3021 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 3022 &ctlsize, hpt_pci_intr); 3023 } else { 3024 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 3025 &cmdsize, &ctlsize); 3026 } 3027 if (cp->hw_ok == 0) 3028 return; 3029 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 3030 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 3031 wdcattach(&cp->wdc_channel); 3032 hpt_setup_channel(&cp->wdc_channel); 3033 } 3034 if (revision == HPT370_REV) { 3035 /* 3036 * HPT370_REV has a bit to disable interrupts, make sure 3037 * to clear it 3038 */ 3039 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 3040 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 3041 ~HPT_CSEL_IRQDIS); 3042 } 3043 return; 3044 } 3045 3046 void 3047 hpt_setup_channel(chp) 3048 struct channel_softc *chp; 3049 { 3050 struct ata_drive_datas *drvp; 3051 int drive; 3052 int cable; 3053 u_int32_t before, after; 3054 u_int32_t idedma_ctl; 3055 struct pciide_channel *cp = (struct pciide_channel*)chp; 3056 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3057 3058 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 3059 3060 /* setup DMA if needed */ 3061 pciide_channel_dma_setup(cp); 3062 3063 idedma_ctl = 0; 3064 3065 /* Per drive settings */ 3066 for (drive = 0; drive < 2; drive++) { 3067 drvp = &chp->ch_drive[drive]; 3068 /* If no drive, skip */ 3069 if ((drvp->drive_flags & DRIVE) == 0) 3070 continue; 3071 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 3072 HPT_IDETIM(chp->channel, drive)); 3073 3074 /* add timing values, setup DMA if needed */ 3075 if (drvp->drive_flags & DRIVE_UDMA) { 3076 /* use Ultra/DMA */ 3077 drvp->drive_flags &= ~DRIVE_DMA; 3078 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 3079 drvp->UDMA_mode > 2) 3080 drvp->UDMA_mode = 2; 3081 after = (sc->sc_wdcdev.nchannels == 2) ? 3082 hpt370_udma[drvp->UDMA_mode] : 3083 hpt366_udma[drvp->UDMA_mode]; 3084 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3085 } else if (drvp->drive_flags & DRIVE_DMA) { 3086 /* 3087 * use Multiword DMA. 3088 * Timings will be used for both PIO and DMA, so adjust 3089 * DMA mode if needed 3090 */ 3091 if (drvp->PIO_mode >= 3 && 3092 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3093 drvp->DMA_mode = drvp->PIO_mode - 2; 3094 } 3095 after = (sc->sc_wdcdev.nchannels == 2) ? 3096 hpt370_dma[drvp->DMA_mode] : 3097 hpt366_dma[drvp->DMA_mode]; 3098 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3099 } else { 3100 /* PIO only */ 3101 after = (sc->sc_wdcdev.nchannels == 2) ? 3102 hpt370_pio[drvp->PIO_mode] : 3103 hpt366_pio[drvp->PIO_mode]; 3104 } 3105 pci_conf_write(sc->sc_pc, sc->sc_tag, 3106 HPT_IDETIM(chp->channel, drive), after); 3107 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 3108 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname, 3109 after, before), DEBUG_PROBE); 3110 } 3111 if (idedma_ctl != 0) { 3112 /* Add software bits in status register */ 3113 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3114 IDEDMA_CTL, idedma_ctl); 3115 } 3116 pciide_print_modes(cp); 3117 } 3118 3119 int 3120 hpt_pci_intr(arg) 3121 void *arg; 3122 { 3123 struct pciide_softc *sc = arg; 3124 struct pciide_channel *cp; 3125 struct channel_softc *wdc_cp; 3126 int rv = 0; 3127 int dmastat, i, crv; 3128 3129 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3130 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3131 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3132 if((dmastat & IDEDMA_CTL_INTR) == 0) 3133 continue; 3134 cp = &sc->pciide_channels[i]; 3135 wdc_cp = &cp->wdc_channel; 3136 crv = wdcintr(wdc_cp); 3137 if (crv == 0) { 3138 printf("%s:%d: bogus intr\n", 3139 sc->sc_wdcdev.sc_dev.dv_xname, i); 3140 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3141 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 3142 } else 3143 rv = 1; 3144 } 3145 return rv; 3146 } 3147 3148 3149 /* A macro to test product */ 3150 #define PDC_IS_262(sc) \ 3151 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \ 3152 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3153 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) 3154 3155 void 3156 pdc202xx_chip_map(sc, pa) 3157 struct pciide_softc *sc; 3158 struct pci_attach_args *pa; 3159 { 3160 struct pciide_channel *cp; 3161 int channel; 3162 pcireg_t interface, st, mode; 3163 bus_size_t cmdsize, ctlsize; 3164 3165 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3166 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st), 3167 DEBUG_PROBE); 3168 if (pciide_chipen(sc, pa) == 0) 3169 return; 3170 3171 /* turn off RAID mode */ 3172 st &= ~PDC2xx_STATE_IDERAID; 3173 3174 /* 3175 * can't rely on the PCI_CLASS_REG content if the chip was in raid 3176 * mode. We have to fake interface 3177 */ 3178 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 3179 if (st & PDC2xx_STATE_NATIVE) 3180 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3181 3182 printf("%s: bus-master DMA support present", 3183 sc->sc_wdcdev.sc_dev.dv_xname); 3184 pciide_mapreg_dma(sc, pa); 3185 printf("\n"); 3186 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3187 WDC_CAPABILITY_MODE; 3188 if (sc->sc_dma_ok) { 3189 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3190 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3191 sc->sc_wdcdev.irqack = pciide_irqack; 3192 } 3193 sc->sc_wdcdev.PIO_cap = 4; 3194 sc->sc_wdcdev.DMA_cap = 2; 3195 if (PDC_IS_262(sc)) 3196 sc->sc_wdcdev.UDMA_cap = 4; 3197 else 3198 sc->sc_wdcdev.UDMA_cap = 2; 3199 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel; 3200 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3201 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3202 3203 /* setup failsafe defaults */ 3204 mode = 0; 3205 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 3206 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 3207 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 3208 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 3209 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3210 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 " 3211 "initial timings 0x%x, now 0x%x\n", channel, 3212 pci_conf_read(sc->sc_pc, sc->sc_tag, 3213 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 3214 DEBUG_PROBE); 3215 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0), 3216 mode | PDC2xx_TIM_IORDYp); 3217 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 " 3218 "initial timings 0x%x, now 0x%x\n", channel, 3219 pci_conf_read(sc->sc_pc, sc->sc_tag, 3220 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 3221 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1), 3222 mode); 3223 } 3224 3225 mode = PDC2xx_SCR_DMA; 3226 if (PDC_IS_262(sc)) { 3227 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 3228 } else { 3229 /* the BIOS set it up this way */ 3230 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 3231 } 3232 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 3233 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 3234 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n", 3235 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode), 3236 DEBUG_PROBE); 3237 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode); 3238 3239 /* controller initial state register is OK even without BIOS */ 3240 /* Set DMA mode to IDE DMA compatibility */ 3241 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 3242 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ), 3243 DEBUG_PROBE); 3244 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 3245 mode | 0x1); 3246 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 3247 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 3248 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 3249 mode | 0x1); 3250 3251 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3252 cp = &sc->pciide_channels[channel]; 3253 if (pciide_chansetup(sc, channel, interface) == 0) 3254 continue; 3255 if ((st & (PDC_IS_262(sc) ? 3256 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 3257 printf("%s: %s channel ignored (disabled)\n", 3258 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3259 continue; 3260 } 3261 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3262 pdc202xx_pci_intr); 3263 if (cp->hw_ok == 0) 3264 continue; 3265 if (pciide_chan_candisable(cp)) 3266 st &= ~(PDC_IS_262(sc) ? 3267 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 3268 pciide_map_compat_intr(pa, cp, channel, interface); 3269 pdc202xx_setup_channel(&cp->wdc_channel); 3270 } 3271 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st), 3272 DEBUG_PROBE); 3273 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 3274 return; 3275 } 3276 3277 void 3278 pdc202xx_setup_channel(chp) 3279 struct channel_softc *chp; 3280 { 3281 struct ata_drive_datas *drvp; 3282 int drive; 3283 pcireg_t mode, st; 3284 u_int32_t idedma_ctl, scr, atapi; 3285 struct pciide_channel *cp = (struct pciide_channel*)chp; 3286 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3287 int channel = chp->channel; 3288 3289 /* setup DMA if needed */ 3290 pciide_channel_dma_setup(cp); 3291 3292 idedma_ctl = 0; 3293 3294 /* Per channel settings */ 3295 if (PDC_IS_262(sc)) { 3296 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3297 PDC262_U66); 3298 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3299 /* Trimm UDMA mode */ 3300 if ((st & PDC262_STATE_80P(channel)) != 0 || 3301 (chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3302 chp->ch_drive[0].UDMA_mode <= 2) || 3303 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3304 chp->ch_drive[1].UDMA_mode <= 2)) { 3305 if (chp->ch_drive[0].UDMA_mode > 2) 3306 chp->ch_drive[0].UDMA_mode = 2; 3307 if (chp->ch_drive[1].UDMA_mode > 2) 3308 chp->ch_drive[1].UDMA_mode = 2; 3309 } 3310 /* Set U66 if needed */ 3311 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3312 chp->ch_drive[0].UDMA_mode > 2) || 3313 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3314 chp->ch_drive[1].UDMA_mode > 2)) 3315 scr |= PDC262_U66_EN(channel); 3316 else 3317 scr &= ~PDC262_U66_EN(channel); 3318 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3319 PDC262_U66, scr); 3320 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 3321 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 3322 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3323 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3324 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 3325 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3326 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3327 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 3328 atapi = 0; 3329 else 3330 atapi = PDC262_ATAPI_UDMA; 3331 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3332 PDC262_ATAPI(channel), atapi); 3333 } 3334 } 3335 for (drive = 0; drive < 2; drive++) { 3336 drvp = &chp->ch_drive[drive]; 3337 /* If no drive, skip */ 3338 if ((drvp->drive_flags & DRIVE) == 0) 3339 continue; 3340 mode = 0; 3341 if (drvp->drive_flags & DRIVE_UDMA) { 3342 /* use Ultra/DMA */ 3343 drvp->drive_flags &= ~DRIVE_DMA; 3344 mode = PDC2xx_TIM_SET_MB(mode, 3345 pdc2xx_udma_mb[drvp->UDMA_mode]); 3346 mode = PDC2xx_TIM_SET_MC(mode, 3347 pdc2xx_udma_mc[drvp->UDMA_mode]); 3348 drvp->drive_flags &= ~DRIVE_DMA; 3349 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3350 } else if (drvp->drive_flags & DRIVE_DMA) { 3351 mode = PDC2xx_TIM_SET_MB(mode, 3352 pdc2xx_dma_mb[drvp->DMA_mode]); 3353 mode = PDC2xx_TIM_SET_MC(mode, 3354 pdc2xx_dma_mc[drvp->DMA_mode]); 3355 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3356 } else { 3357 mode = PDC2xx_TIM_SET_MB(mode, 3358 pdc2xx_dma_mb[0]); 3359 mode = PDC2xx_TIM_SET_MC(mode, 3360 pdc2xx_dma_mc[0]); 3361 } 3362 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 3363 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 3364 if (drvp->drive_flags & DRIVE_ATA) 3365 mode |= PDC2xx_TIM_PRE; 3366 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 3367 if (drvp->PIO_mode >= 3) { 3368 mode |= PDC2xx_TIM_IORDY; 3369 if (drive == 0) 3370 mode |= PDC2xx_TIM_IORDYp; 3371 } 3372 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 3373 "timings 0x%x\n", 3374 sc->sc_wdcdev.sc_dev.dv_xname, 3375 chp->channel, drive, mode), DEBUG_PROBE); 3376 pci_conf_write(sc->sc_pc, sc->sc_tag, 3377 PDC2xx_TIM(chp->channel, drive), mode); 3378 } 3379 if (idedma_ctl != 0) { 3380 /* Add software bits in status register */ 3381 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3382 IDEDMA_CTL, idedma_ctl); 3383 } 3384 pciide_print_modes(cp); 3385 } 3386 3387 int 3388 pdc202xx_pci_intr(arg) 3389 void *arg; 3390 { 3391 struct pciide_softc *sc = arg; 3392 struct pciide_channel *cp; 3393 struct channel_softc *wdc_cp; 3394 int i, rv, crv; 3395 u_int32_t scr; 3396 3397 rv = 0; 3398 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 3399 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3400 cp = &sc->pciide_channels[i]; 3401 wdc_cp = &cp->wdc_channel; 3402 /* If a compat channel skip. */ 3403 if (cp->compat) 3404 continue; 3405 if (scr & PDC2xx_SCR_INT(i)) { 3406 crv = wdcintr(wdc_cp); 3407 if (crv == 0) 3408 printf("%s:%d: bogus intr\n", 3409 sc->sc_wdcdev.sc_dev.dv_xname, i); 3410 else 3411 rv = 1; 3412 } 3413 } 3414 return rv; 3415 } 3416 3417 void 3418 opti_chip_map(sc, pa) 3419 struct pciide_softc *sc; 3420 struct pci_attach_args *pa; 3421 { 3422 struct pciide_channel *cp; 3423 bus_size_t cmdsize, ctlsize; 3424 pcireg_t interface; 3425 u_int8_t init_ctrl; 3426 int channel; 3427 3428 if (pciide_chipen(sc, pa) == 0) 3429 return; 3430 printf("%s: bus-master DMA support present", 3431 sc->sc_wdcdev.sc_dev.dv_xname); 3432 pciide_mapreg_dma(sc, pa); 3433 printf("\n"); 3434 3435 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3436 WDC_CAPABILITY_MODE; 3437 sc->sc_wdcdev.PIO_cap = 4; 3438 if (sc->sc_dma_ok) { 3439 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3440 sc->sc_wdcdev.irqack = pciide_irqack; 3441 sc->sc_wdcdev.DMA_cap = 2; 3442 } 3443 sc->sc_wdcdev.set_modes = opti_setup_channel; 3444 3445 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3446 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3447 3448 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 3449 OPTI_REG_INIT_CONTROL); 3450 3451 interface = PCI_INTERFACE(pa->pa_class); 3452 3453 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3454 cp = &sc->pciide_channels[channel]; 3455 if (pciide_chansetup(sc, channel, interface) == 0) 3456 continue; 3457 if (channel == 1 && 3458 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 3459 printf("%s: %s channel ignored (disabled)\n", 3460 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3461 continue; 3462 } 3463 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3464 pciide_pci_intr); 3465 if (cp->hw_ok == 0) 3466 continue; 3467 pciide_map_compat_intr(pa, cp, channel, interface); 3468 if (cp->hw_ok == 0) 3469 continue; 3470 opti_setup_channel(&cp->wdc_channel); 3471 } 3472 } 3473 3474 void 3475 opti_setup_channel(chp) 3476 struct channel_softc *chp; 3477 { 3478 struct ata_drive_datas *drvp; 3479 struct pciide_channel *cp = (struct pciide_channel*)chp; 3480 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3481 int drive, spd; 3482 int mode[2]; 3483 u_int8_t rv, mr; 3484 3485 /* 3486 * The `Delay' and `Address Setup Time' fields of the 3487 * Miscellaneous Register are always zero initially. 3488 */ 3489 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 3490 mr &= ~(OPTI_MISC_DELAY_MASK | 3491 OPTI_MISC_ADDR_SETUP_MASK | 3492 OPTI_MISC_INDEX_MASK); 3493 3494 /* Prime the control register before setting timing values */ 3495 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 3496 3497 /* Determine the clockrate of the PCIbus the chip is attached to */ 3498 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 3499 spd &= OPTI_STRAP_PCI_SPEED_MASK; 3500 3501 /* setup DMA if needed */ 3502 pciide_channel_dma_setup(cp); 3503 3504 for (drive = 0; drive < 2; drive++) { 3505 drvp = &chp->ch_drive[drive]; 3506 /* If no drive, skip */ 3507 if ((drvp->drive_flags & DRIVE) == 0) { 3508 mode[drive] = -1; 3509 continue; 3510 } 3511 3512 if ((drvp->drive_flags & DRIVE_DMA)) { 3513 /* 3514 * Timings will be used for both PIO and DMA, 3515 * so adjust DMA mode if needed 3516 */ 3517 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3518 drvp->PIO_mode = drvp->DMA_mode + 2; 3519 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3520 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3521 drvp->PIO_mode - 2 : 0; 3522 if (drvp->DMA_mode == 0) 3523 drvp->PIO_mode = 0; 3524 3525 mode[drive] = drvp->DMA_mode + 5; 3526 } else 3527 mode[drive] = drvp->PIO_mode; 3528 3529 if (drive && mode[0] >= 0 && 3530 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 3531 /* 3532 * Can't have two drives using different values 3533 * for `Address Setup Time'. 3534 * Slow down the faster drive to compensate. 3535 */ 3536 int d = (opti_tim_as[spd][mode[0]] > 3537 opti_tim_as[spd][mode[1]]) ? 0 : 1; 3538 3539 mode[d] = mode[1-d]; 3540 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 3541 chp->ch_drive[d].DMA_mode = 0; 3542 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 3543 } 3544 } 3545 3546 for (drive = 0; drive < 2; drive++) { 3547 int m; 3548 if ((m = mode[drive]) < 0) 3549 continue; 3550 3551 /* Set the Address Setup Time and select appropriate index */ 3552 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 3553 rv |= OPTI_MISC_INDEX(drive); 3554 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 3555 3556 /* Set the pulse width and recovery timing parameters */ 3557 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 3558 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 3559 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 3560 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 3561 3562 /* Set the Enhanced Mode register appropriately */ 3563 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 3564 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 3565 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 3566 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 3567 } 3568 3569 /* Finally, enable the timings */ 3570 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 3571 3572 pciide_print_modes(cp); 3573 } 3574