1 /* $OpenBSD: pciide.c,v 1.149 2003/11/07 10:16:45 jmc Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 /* 36 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by Christopher G. Demetriou 49 * for the NetBSD Project. 50 * 4. The name of the author may not be used to endorse or promote products 51 * derived from this software without specific prior written permission 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 57 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 58 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 62 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 /* 66 * PCI IDE controller driver. 67 * 68 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 69 * sys/dev/pci/ppb.c, revision 1.16). 70 * 71 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 72 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 73 * 5/16/94" from the PCI SIG. 74 * 75 */ 76 77 #define DEBUG_DMA 0x01 78 #define DEBUG_XFERS 0x02 79 #define DEBUG_FUNCS 0x08 80 #define DEBUG_PROBE 0x10 81 82 #ifdef WDCDEBUG 83 #ifndef WDCDEBUG_PCIIDE_MASK 84 #define WDCDEBUG_PCIIDE_MASK 0x00 85 #endif 86 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 87 #define WDCDEBUG_PRINT(args, level) do { \ 88 if ((wdcdebug_pciide_mask & (level)) != 0) \ 89 printf args; \ 90 } while (0) 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <uvm/uvm_extern.h> 100 101 #include <machine/endian.h> 102 103 #include <dev/pci/pcireg.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcidevs.h> 106 #include <dev/pci/pciidereg.h> 107 #include <dev/pci/pciidevar.h> 108 #include <dev/pci/pciide_piix_reg.h> 109 #include <dev/pci/pciide_amd_reg.h> 110 #include <dev/pci/pciide_apollo_reg.h> 111 #include <dev/pci/pciide_cmd_reg.h> 112 #include <dev/pci/pciide_sii3112_reg.h> 113 #include <dev/pci/pciide_cy693_reg.h> 114 #include <dev/pci/pciide_sis_reg.h> 115 #include <dev/pci/pciide_acer_reg.h> 116 #include <dev/pci/pciide_pdc202xx_reg.h> 117 #include <dev/pci/pciide_opti_reg.h> 118 #include <dev/pci/pciide_hpt_reg.h> 119 #include <dev/pci/pciide_acard_reg.h> 120 #include <dev/pci/pciide_natsemi_reg.h> 121 #include <dev/pci/pciide_nforce_reg.h> 122 #include <dev/pci/pciide_i31244_reg.h> 123 #include <dev/pci/cy82c693var.h> 124 125 #include <dev/ata/atavar.h> 126 #include <dev/ic/wdcreg.h> 127 #include <dev/ic/wdcvar.h> 128 129 /* inlines for reading/writing 8-bit PCI registers */ 130 static __inline u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 131 int); 132 static __inline void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 133 int, u_int8_t); 134 135 static __inline u_int8_t 136 pciide_pci_read(pc, pa, reg) 137 pci_chipset_tag_t pc; 138 pcitag_t pa; 139 int reg; 140 { 141 142 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 143 ((reg & 0x03) * 8) & 0xff); 144 } 145 146 static __inline void 147 pciide_pci_write(pc, pa, reg, val) 148 pci_chipset_tag_t pc; 149 pcitag_t pa; 150 int reg; 151 u_int8_t val; 152 { 153 pcireg_t pcival; 154 155 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 156 pcival &= ~(0xff << ((reg & 0x03) * 8)); 157 pcival |= (val << ((reg & 0x03) * 8)); 158 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 159 } 160 161 struct pciide_softc { 162 struct wdc_softc sc_wdcdev; /* common wdc definitions */ 163 pci_chipset_tag_t sc_pc; /* PCI registers info */ 164 pcitag_t sc_tag; 165 void *sc_pci_ih; /* PCI interrupt handle */ 166 int sc_dma_ok; /* bus-master DMA info */ 167 bus_space_tag_t sc_dma_iot; 168 bus_space_handle_t sc_dma_ioh; 169 bus_dma_tag_t sc_dmat; 170 171 /* 172 * Some controllers might have DMA restrictions other than 173 * the norm. 174 */ 175 bus_size_t sc_dma_maxsegsz; 176 bus_size_t sc_dma_boundary; 177 178 /* For Cypress */ 179 const struct cy82c693_handle *sc_cy_handle; 180 int sc_cy_compatchan; 181 182 /* For SiS */ 183 u_int8_t sis_type; 184 185 /* Chip description */ 186 const struct pciide_product_desc *sc_pp; 187 /* Chip revision */ 188 int sc_rev; 189 /* common definitions */ 190 struct channel_softc *wdc_chanarray[PCIIDE_NUM_CHANNELS]; 191 /* internal bookkeeping */ 192 struct pciide_channel { /* per-channel data */ 193 struct channel_softc wdc_channel; /* generic part */ 194 char *name; 195 int hw_ok; /* hardware mapped & OK? */ 196 int compat; /* is it compat? */ 197 int dma_in_progress; 198 void *ih; /* compat or pci handle */ 199 bus_space_handle_t ctl_baseioh; /* ctrl regs blk, native mode */ 200 /* DMA tables and DMA map for xfer, for each drive */ 201 struct pciide_dma_maps { 202 bus_dmamap_t dmamap_table; 203 struct idedma_table *dma_table; 204 bus_dmamap_t dmamap_xfer; 205 int dma_flags; 206 } dma_maps[2]; 207 } pciide_channels[PCIIDE_NUM_CHANNELS]; 208 }; 209 210 void default_chip_map(struct pciide_softc*, struct pci_attach_args*); 211 212 void sata_setup_channel(struct channel_softc *); 213 214 void piix_chip_map(struct pciide_softc*, struct pci_attach_args*); 215 void piix_setup_channel(struct channel_softc*); 216 void piix3_4_setup_channel(struct channel_softc*); 217 218 static u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 219 static u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas*); 220 static u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 221 222 void amd756_chip_map(struct pciide_softc*, struct pci_attach_args*); 223 void amd756_setup_channel(struct channel_softc*); 224 225 void apollo_chip_map(struct pciide_softc*, struct pci_attach_args*); 226 void apollo_sata_chip_map(struct pciide_softc*, struct pci_attach_args*); 227 void apollo_setup_channel(struct channel_softc*); 228 229 void cmd_chip_map(struct pciide_softc*, struct pci_attach_args*); 230 void cmd0643_9_chip_map(struct pciide_softc*, struct pci_attach_args*); 231 void cmd0643_9_setup_channel(struct channel_softc*); 232 void cmd680_chip_map(struct pciide_softc*, struct pci_attach_args*); 233 void cmd680_setup_channel(struct channel_softc*); 234 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 235 void cmd_channel_map(struct pci_attach_args *, 236 struct pciide_softc *, int); 237 int cmd_pci_intr(void *); 238 void cmd646_9_irqack(struct channel_softc *); 239 240 void sii3112_chip_map(struct pciide_softc*, struct pci_attach_args*); 241 void sii3112_setup_channel(struct channel_softc*); 242 243 void cy693_chip_map(struct pciide_softc*, struct pci_attach_args*); 244 void cy693_setup_channel(struct channel_softc*); 245 246 void sis_chip_map(struct pciide_softc*, struct pci_attach_args*); 247 void sis_setup_channel(struct channel_softc*); 248 void sis96x_setup_channel(struct channel_softc *); 249 int sis_hostbr_match(struct pci_attach_args *); 250 int sis_south_match(struct pci_attach_args *); 251 252 void natsemi_chip_map(struct pciide_softc*, struct pci_attach_args*); 253 void natsemi_setup_channel(struct channel_softc*); 254 int natsemi_pci_intr(void *); 255 void natsemi_irqack(struct channel_softc *); 256 257 void acer_chip_map(struct pciide_softc*, struct pci_attach_args*); 258 void acer_setup_channel(struct channel_softc*); 259 int acer_pci_intr(void *); 260 261 void pdc202xx_chip_map(struct pciide_softc*, struct pci_attach_args*); 262 void pdc202xx_setup_channel(struct channel_softc*); 263 void pdc20268_setup_channel(struct channel_softc*); 264 int pdc202xx_pci_intr(void *); 265 int pdc20265_pci_intr(void *); 266 void pdc20262_dma_start(void *, int, int); 267 int pdc20262_dma_finish(void *, int, int, int); 268 269 void opti_chip_map(struct pciide_softc*, struct pci_attach_args*); 270 void opti_setup_channel(struct channel_softc*); 271 272 void hpt_chip_map(struct pciide_softc*, struct pci_attach_args*); 273 void hpt_setup_channel(struct channel_softc*); 274 int hpt_pci_intr(void *); 275 276 void acard_chip_map(struct pciide_softc*, struct pci_attach_args*); 277 void acard_setup_channel(struct channel_softc*); 278 int acard_pci_intr(void *); 279 280 void serverworks_chip_map(struct pciide_softc*, struct pci_attach_args*); 281 void serverworks_setup_channel(struct channel_softc*); 282 int serverworks_pci_intr(void *); 283 284 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 285 void nforce_setup_channel(struct channel_softc *); 286 int nforce_pci_intr(void *); 287 288 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 289 290 void pciide_channel_dma_setup(struct pciide_channel *); 291 int pciide_dma_table_setup(struct pciide_softc*, int, int); 292 int pciide_dma_init(void *, int, int, void *, size_t, int); 293 void pciide_dma_start(void *, int, int); 294 int pciide_dma_finish(void *, int, int, int); 295 void pciide_irqack(struct channel_softc *); 296 void pciide_print_modes(struct pciide_channel *); 297 void pciide_print_channels(int, pcireg_t); 298 299 struct pciide_product_desc { 300 u_int32_t ide_product; 301 u_short ide_flags; 302 /* map and setup chip, probe drives */ 303 void (*chip_map)(struct pciide_softc*, struct pci_attach_args*); 304 }; 305 306 /* Flags for ide_flags */ 307 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 308 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 309 310 /* Default product description for devices not known from this controller */ 311 const struct pciide_product_desc default_product_desc = { 312 0, /* Generic PCI IDE controller */ 313 0, 314 default_chip_map 315 }; 316 317 const struct pciide_product_desc pciide_intel_products[] = { 318 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 319 0, 320 default_chip_map 321 }, 322 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 323 0, 324 piix_chip_map 325 }, 326 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 327 0, 328 piix_chip_map 329 }, 330 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 331 0, 332 piix_chip_map 333 }, 334 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 335 0, 336 piix_chip_map 337 }, 338 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 339 0, 340 piix_chip_map 341 }, 342 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 343 0, 344 piix_chip_map 345 }, 346 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 347 0, 348 piix_chip_map 349 }, 350 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 351 0, 352 piix_chip_map 353 }, 354 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 355 0, 356 piix_chip_map 357 }, 358 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 359 0, 360 piix_chip_map 361 }, 362 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 363 0, 364 piix_chip_map 365 }, 366 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 367 0, 368 piix_chip_map 369 }, 370 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 371 0, 372 piix_chip_map 373 }, 374 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 375 0, 376 piix_chip_map 377 }, 378 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 379 0, 380 piix_chip_map 381 }, 382 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 383 0, 384 artisea_chip_map 385 } 386 }; 387 388 const struct pciide_product_desc pciide_amd_products[] = { 389 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 390 0, 391 amd756_chip_map 392 }, 393 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 394 0, 395 amd756_chip_map 396 }, 397 { PCI_PRODUCT_AMD_PBC768_IDE, 398 0, 399 amd756_chip_map 400 }, 401 { PCI_PRODUCT_AMD_8111_IDE, 402 0, 403 amd756_chip_map 404 } 405 }; 406 407 #ifdef notyet 408 const struct pciide_product_desc pciide_opti_products[] = { 409 410 { PCI_PRODUCT_OPTI_82C621, 411 0, 412 opti_chip_map 413 }, 414 { PCI_PRODUCT_OPTI_82C568, 415 0, 416 opti_chip_map 417 }, 418 { PCI_PRODUCT_OPTI_82D568, 419 0, 420 opti_chip_map 421 }, 422 }; 423 #endif 424 425 const struct pciide_product_desc pciide_cmd_products[] = { 426 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 427 0, 428 cmd_chip_map 429 }, 430 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 431 0, 432 cmd0643_9_chip_map 433 }, 434 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 435 0, 436 cmd0643_9_chip_map 437 }, 438 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 439 IDE_PCI_CLASS_OVERRIDE, 440 cmd0643_9_chip_map 441 }, 442 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 443 IDE_PCI_CLASS_OVERRIDE, 444 cmd0643_9_chip_map 445 }, 446 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 447 IDE_PCI_CLASS_OVERRIDE, 448 cmd680_chip_map 449 }, 450 { PCI_PRODUCT_CMDTECH_3112, /* SiI 3112 SATA */ 451 IDE_PCI_CLASS_OVERRIDE, /* XXX: subclass RAID */ 452 sii3112_chip_map 453 } 454 }; 455 456 const struct pciide_product_desc pciide_via_products[] = { 457 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 458 0, 459 apollo_chip_map 460 }, 461 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 462 0, 463 apollo_chip_map 464 }, 465 { PCI_PRODUCT_VIATECH_VT8237_SATA, /* VIA VT8237 SATA */ 466 IDE_PCI_CLASS_OVERRIDE, 467 apollo_sata_chip_map 468 } 469 }; 470 471 const struct pciide_product_desc pciide_cypress_products[] = { 472 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 473 IDE_16BIT_IOSPACE, 474 cy693_chip_map 475 } 476 }; 477 478 const struct pciide_product_desc pciide_sis_products[] = { 479 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 480 0, 481 sis_chip_map 482 } 483 }; 484 485 const struct pciide_product_desc pciide_natsemi_products[] = { 486 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 487 0, 488 natsemi_chip_map 489 } 490 }; 491 492 const struct pciide_product_desc pciide_acer_products[] = { 493 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 494 0, 495 acer_chip_map 496 } 497 }; 498 499 const struct pciide_product_desc pciide_triones_products[] = { 500 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 501 IDE_PCI_CLASS_OVERRIDE, 502 hpt_chip_map, 503 }, 504 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 505 IDE_PCI_CLASS_OVERRIDE, 506 hpt_chip_map 507 }, 508 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 509 IDE_PCI_CLASS_OVERRIDE, 510 hpt_chip_map 511 }, 512 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 513 IDE_PCI_CLASS_OVERRIDE, 514 hpt_chip_map 515 }, 516 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 517 IDE_PCI_CLASS_OVERRIDE, 518 hpt_chip_map 519 } 520 }; 521 522 const struct pciide_product_desc pciide_promise_products[] = { 523 { PCI_PRODUCT_PROMISE_PDC20246, 524 IDE_PCI_CLASS_OVERRIDE, 525 pdc202xx_chip_map, 526 }, 527 { PCI_PRODUCT_PROMISE_PDC20262, 528 IDE_PCI_CLASS_OVERRIDE, 529 pdc202xx_chip_map, 530 }, 531 { PCI_PRODUCT_PROMISE_PDC20265, 532 IDE_PCI_CLASS_OVERRIDE, 533 pdc202xx_chip_map, 534 }, 535 { PCI_PRODUCT_PROMISE_PDC20267, 536 IDE_PCI_CLASS_OVERRIDE, 537 pdc202xx_chip_map, 538 }, 539 { PCI_PRODUCT_PROMISE_PDC20268, 540 IDE_PCI_CLASS_OVERRIDE, 541 pdc202xx_chip_map, 542 }, 543 { PCI_PRODUCT_PROMISE_PDC20268R, 544 IDE_PCI_CLASS_OVERRIDE, 545 pdc202xx_chip_map, 546 }, 547 { PCI_PRODUCT_PROMISE_PDC20269, 548 IDE_PCI_CLASS_OVERRIDE, 549 pdc202xx_chip_map, 550 }, 551 { PCI_PRODUCT_PROMISE_PDC20271, 552 IDE_PCI_CLASS_OVERRIDE, 553 pdc202xx_chip_map, 554 }, 555 { PCI_PRODUCT_PROMISE_PDC20275, 556 IDE_PCI_CLASS_OVERRIDE, 557 pdc202xx_chip_map, 558 }, 559 { PCI_PRODUCT_PROMISE_PDC20276, 560 IDE_PCI_CLASS_OVERRIDE, 561 pdc202xx_chip_map, 562 }, 563 { PCI_PRODUCT_PROMISE_PDC20277, 564 IDE_PCI_CLASS_OVERRIDE, 565 pdc202xx_chip_map, 566 }, 567 { PCI_PRODUCT_PROMISE_PDC20376, /* PDC20376 SATA */ 568 IDE_PCI_CLASS_OVERRIDE, /* XXX: subclass RAID */ 569 pdc202xx_chip_map, 570 } 571 }; 572 573 const struct pciide_product_desc pciide_acard_products[] = { 574 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 575 IDE_PCI_CLASS_OVERRIDE, 576 acard_chip_map, 577 }, 578 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 579 IDE_PCI_CLASS_OVERRIDE, 580 acard_chip_map, 581 }, 582 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 583 IDE_PCI_CLASS_OVERRIDE, 584 acard_chip_map, 585 } 586 }; 587 588 const struct pciide_product_desc pciide_serverworks_products[] = { 589 { PCI_PRODUCT_RCC_OSB4_IDE, 590 0, 591 serverworks_chip_map, 592 }, 593 { PCI_PRODUCT_RCC_CSB5_IDE, 594 0, 595 serverworks_chip_map, 596 }, 597 { PCI_PRODUCT_RCC_CSB6_IDE, 598 0, 599 serverworks_chip_map, 600 }, 601 { PCI_PRODUCT_RCC_CSB6_IDE2, 602 0, 603 serverworks_chip_map, 604 } 605 }; 606 607 const struct pciide_product_desc pciide_nvidia_products[] = { 608 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 609 0, 610 nforce_chip_map 611 }, 612 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 613 0, 614 nforce_chip_map 615 } 616 }; 617 618 619 struct pciide_vendor_desc { 620 u_int32_t ide_vendor; 621 const struct pciide_product_desc *ide_products; 622 int ide_nproducts; 623 }; 624 625 const struct pciide_vendor_desc pciide_vendors[] = { 626 { PCI_VENDOR_INTEL, pciide_intel_products, 627 sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) }, 628 { PCI_VENDOR_AMD, pciide_amd_products, 629 sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) }, 630 #ifdef notyet 631 { PCI_VENDOR_OPTI, pciide_opti_products, 632 sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) }, 633 #endif 634 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 635 sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) }, 636 { PCI_VENDOR_VIATECH, pciide_via_products, 637 sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) }, 638 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 639 sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) }, 640 { PCI_VENDOR_SIS, pciide_sis_products, 641 sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) }, 642 { PCI_VENDOR_NS, pciide_natsemi_products, 643 sizeof(pciide_natsemi_products)/sizeof(pciide_natsemi_products[0]) }, 644 { PCI_VENDOR_ALI, pciide_acer_products, 645 sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) }, 646 { PCI_VENDOR_TRIONES, pciide_triones_products, 647 sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) }, 648 { PCI_VENDOR_ACARD, pciide_acard_products, 649 sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) }, 650 { PCI_VENDOR_RCC, pciide_serverworks_products, 651 sizeof(pciide_serverworks_products)/sizeof(pciide_serverworks_products[0]) }, 652 { PCI_VENDOR_PROMISE, pciide_promise_products, 653 sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }, 654 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 655 sizeof(pciide_nvidia_products)/sizeof(pciide_nvidia_products[0]) } 656 }; 657 658 /* options passed via the 'flags' config keyword */ 659 #define PCIIDE_OPTIONS_DMA 0x01 660 661 #ifndef __OpenBSD__ 662 int pciide_match(struct device *, struct cfdata *, void *); 663 #else 664 int pciide_match(struct device *, void *, void *); 665 #endif 666 void pciide_attach(struct device *, struct device *, void *); 667 668 struct cfattach pciide_ca = { 669 sizeof(struct pciide_softc), pciide_match, pciide_attach 670 }; 671 672 #ifdef __OpenBSD__ 673 struct cfdriver pciide_cd = { 674 NULL, "pciide", DV_DULL 675 }; 676 #endif 677 int pciide_chipen(struct pciide_softc *, struct pci_attach_args *); 678 int pciide_mapregs_compat( struct pci_attach_args *, 679 struct pciide_channel *, int, bus_size_t *, bus_size_t*); 680 int pciide_mapregs_native(struct pci_attach_args *, 681 struct pciide_channel *, bus_size_t *, bus_size_t *, 682 int (*pci_intr)(void *)); 683 void pciide_mapreg_dma(struct pciide_softc *, 684 struct pci_attach_args *); 685 int pciide_chansetup(struct pciide_softc *, int, pcireg_t); 686 void pciide_mapchan(struct pci_attach_args *, 687 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 688 int (*pci_intr)(void *)); 689 int pciide_chan_candisable(struct pciide_channel *); 690 void pciide_map_compat_intr( struct pci_attach_args *, 691 struct pciide_channel *, int, int); 692 void pciide_unmap_compat_intr( struct pci_attach_args *, 693 struct pciide_channel *, int, int); 694 int pciide_compat_intr(void *); 695 int pciide_pci_intr(void *); 696 int pciide_intr_flag(struct pciide_channel *); 697 698 const struct pciide_product_desc* pciide_lookup_product(u_int32_t); 699 700 const struct pciide_product_desc * 701 pciide_lookup_product(id) 702 u_int32_t id; 703 { 704 const struct pciide_product_desc *pp; 705 const struct pciide_vendor_desc *vp; 706 int i; 707 708 for (i = 0, vp = pciide_vendors; 709 i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]); 710 vp++, i++) 711 if (PCI_VENDOR(id) == vp->ide_vendor) 712 break; 713 714 if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0])) 715 return NULL; 716 717 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 718 if (PCI_PRODUCT(id) == pp->ide_product) 719 break; 720 721 if (i == vp->ide_nproducts) 722 return NULL; 723 return pp; 724 } 725 726 int 727 pciide_match(parent, match, aux) 728 struct device *parent; 729 #ifdef __OpenBSD__ 730 void *match; 731 #else 732 struct cfdata *match; 733 #endif 734 void *aux; 735 { 736 struct pci_attach_args *pa = aux; 737 const struct pciide_product_desc *pp; 738 739 /* 740 * Some IDE controllers have severe bugs when used in PCI mode. 741 * We punt and attach them to the ISA bus instead. 742 */ 743 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 744 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 745 return (0); 746 747 /* 748 * Check the ID register to see that it's a PCI IDE controller. 749 * If it is, we assume that we can deal with it; it _should_ 750 * work in a standardized way... 751 */ 752 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 753 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 754 return (1); 755 } 756 757 /* 758 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 759 * controllers. Let see if we can deal with it anyway. 760 */ 761 pp = pciide_lookup_product(pa->pa_id); 762 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 763 return (1); 764 } 765 766 return (0); 767 } 768 769 void 770 pciide_attach(parent, self, aux) 771 struct device *parent, *self; 772 void *aux; 773 { 774 struct pci_attach_args *pa = aux; 775 pci_chipset_tag_t pc = pa->pa_pc; 776 pcitag_t tag = pa->pa_tag; 777 struct pciide_softc *sc = (struct pciide_softc *)self; 778 pcireg_t csr; 779 char devinfo[256]; 780 781 sc->sc_pp = pciide_lookup_product(pa->pa_id); 782 if (sc->sc_pp == NULL) { 783 sc->sc_pp = &default_product_desc; 784 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, 785 sizeof devinfo); 786 } 787 788 sc->sc_pc = pa->pa_pc; 789 sc->sc_tag = pa->pa_tag; 790 791 /* Set up DMA defaults; these might be adjusted by chip_map. */ 792 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 793 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 794 795 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 796 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 797 798 sc->sc_pp->chip_map(sc, pa); 799 800 if (sc->sc_dma_ok) { 801 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 802 csr |= PCI_COMMAND_MASTER_ENABLE; 803 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 804 } 805 806 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 807 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 808 } 809 810 /* tell whether the chip is enabled or not */ 811 int 812 pciide_chipen(sc, pa) 813 struct pciide_softc *sc; 814 struct pci_attach_args *pa; 815 { 816 pcireg_t csr; 817 818 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 819 if ((csr & PCI_COMMAND_IO_ENABLE) == 0 ) { 820 printf("\n%s: device disabled\n", 821 sc->sc_wdcdev.sc_dev.dv_xname); 822 return 0; 823 } 824 825 return 1; 826 } 827 828 int 829 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 830 struct pci_attach_args *pa; 831 struct pciide_channel *cp; 832 int compatchan; 833 bus_size_t *cmdsizep, *ctlsizep; 834 { 835 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 836 struct channel_softc *wdc_cp = &cp->wdc_channel; 837 838 cp->compat = 1; 839 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 840 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 841 842 wdc_cp->cmd_iot = pa->pa_iot; 843 844 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 845 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 846 printf("%s: couldn't map %s cmd regs\n", 847 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 848 return (0); 849 } 850 851 wdc_cp->ctl_iot = pa->pa_iot; 852 853 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 854 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 855 printf("%s: couldn't map %s ctl regs\n", 856 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 857 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 858 PCIIDE_COMPAT_CMD_SIZE); 859 return (0); 860 } 861 862 return (1); 863 } 864 865 int 866 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 867 struct pci_attach_args * pa; 868 struct pciide_channel *cp; 869 bus_size_t *cmdsizep, *ctlsizep; 870 int (*pci_intr)(void *); 871 { 872 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 873 struct channel_softc *wdc_cp = &cp->wdc_channel; 874 const char *intrstr; 875 pci_intr_handle_t intrhandle; 876 pcireg_t maptype; 877 878 cp->compat = 0; 879 880 if (sc->sc_pci_ih == NULL) { 881 if (pci_intr_map(pa, &intrhandle) != 0) { 882 printf("%s: couldn't map native-PCI interrupt\n", 883 sc->sc_wdcdev.sc_dev.dv_xname); 884 return 0; 885 } 886 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 887 #ifdef __OpenBSD__ 888 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 889 intrhandle, IPL_BIO, pci_intr, sc, 890 sc->sc_wdcdev.sc_dev.dv_xname); 891 #ifdef __pegasos__ 892 /* stupid broken board */ 893 if (intrhandle == 0xe) 894 pci_intr_establish(pa->pa_pc, 895 0xf, IPL_BIO, pci_intr, sc, 896 sc->sc_wdcdev.sc_dev.dv_xname); 897 #endif 898 #else 899 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 900 intrhandle, IPL_BIO, pci_intr, sc); 901 #endif 902 if (sc->sc_pci_ih != NULL) { 903 printf("%s: using %s for native-PCI interrupt\n", 904 sc->sc_wdcdev.sc_dev.dv_xname, 905 intrstr ? intrstr : "unknown interrupt"); 906 } else { 907 printf("%s: couldn't establish native-PCI interrupt", 908 sc->sc_wdcdev.sc_dev.dv_xname); 909 if (intrstr != NULL) 910 printf(" at %s", intrstr); 911 printf("\n"); 912 return 0; 913 } 914 } 915 cp->ih = sc->sc_pci_ih; 916 917 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 918 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 919 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 920 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 921 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 922 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 923 maptype, 0, 924 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 925 printf("%s: couldn't map %s cmd regs\n", 926 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 927 return 0; 928 } 929 930 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 931 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 932 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 933 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 934 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 935 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 936 maptype, 0, 937 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 938 printf("%s: couldn't map %s ctl regs\n", 939 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 940 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 941 return 0; 942 } 943 /* 944 * In native mode, 4 bytes of I/O space are mapped for the control 945 * register, the control register is at offset 2. Pass the generic 946 * code a handle for only one byte at the right offset. 947 */ 948 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 949 &wdc_cp->ctl_ioh) != 0) { 950 printf("%s: unable to subregion %s ctl regs\n", 951 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 952 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 953 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 954 return 0; 955 } 956 return (1); 957 } 958 959 void 960 pciide_mapreg_dma(sc, pa) 961 struct pciide_softc *sc; 962 struct pci_attach_args *pa; 963 { 964 pcireg_t maptype; 965 bus_addr_t addr; 966 967 /* 968 * Map DMA registers 969 * 970 * Note that sc_dma_ok is the right variable to test to see if 971 * DMA can be done. If the interface doesn't support DMA, 972 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 973 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 974 * non-zero if the interface supports DMA and the registers 975 * could be mapped. 976 * 977 * XXX Note that despite the fact that the Bus Master IDE specs 978 * XXX say that "The bus master IDE function uses 16 bytes of IO 979 * XXX space," some controllers (at least the United 980 * XXX Microelectronics UM8886BF) place it in memory space. 981 */ 982 983 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 984 PCIIDE_REG_BUS_MASTER_DMA); 985 986 switch (maptype) { 987 case PCI_MAPREG_TYPE_IO: 988 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 989 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 990 &addr, NULL, NULL) == 0); 991 if (sc->sc_dma_ok == 0) { 992 printf(", unused (couldn't query registers)"); 993 break; 994 } 995 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 996 && addr >= 0x10000) { 997 sc->sc_dma_ok = 0; 998 printf(", unused (registers at unsafe address %#lx)", addr); 999 break; 1000 } 1001 /* FALLTHROUGH */ 1002 1003 case PCI_MAPREG_MEM_TYPE_32BIT: 1004 sc->sc_dma_ok = (pci_mapreg_map(pa, 1005 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1006 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL, 0) == 0); 1007 sc->sc_dmat = pa->pa_dmat; 1008 if (sc->sc_dma_ok == 0) { 1009 printf(", unused (couldn't map registers)"); 1010 } else { 1011 sc->sc_wdcdev.dma_arg = sc; 1012 sc->sc_wdcdev.dma_init = pciide_dma_init; 1013 sc->sc_wdcdev.dma_start = pciide_dma_start; 1014 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1015 } 1016 break; 1017 1018 default: 1019 sc->sc_dma_ok = 0; 1020 printf(", (unsupported maptype 0x%x)", maptype); 1021 break; 1022 } 1023 } 1024 1025 int 1026 pciide_intr_flag(struct pciide_channel *cp) 1027 { 1028 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1029 1030 if (cp->dma_in_progress) { 1031 int retry = 10; 1032 int status; 1033 1034 /* Check the status register */ 1035 for (retry = 10; retry > 0; retry--) { 1036 status = bus_space_read_1(sc->sc_dma_iot, 1037 sc->sc_dma_ioh, 1038 IDEDMA_CTL(cp->wdc_channel.channel)); 1039 if (status & IDEDMA_CTL_INTR) { 1040 break; 1041 } 1042 DELAY(5); 1043 } 1044 1045 /* Not for us. */ 1046 if (retry == 0) 1047 return (0); 1048 1049 return (1); 1050 } 1051 1052 return (-1); 1053 } 1054 1055 int 1056 pciide_compat_intr(arg) 1057 void *arg; 1058 { 1059 struct pciide_channel *cp = arg; 1060 1061 if (pciide_intr_flag(cp) == 0) 1062 return 0; 1063 1064 #ifdef DIAGNOSTIC 1065 /* should only be called for a compat channel */ 1066 if (cp->compat == 0) 1067 panic("pciide compat intr called for non-compat chan %p", cp); 1068 #endif 1069 return (wdcintr(&cp->wdc_channel)); 1070 } 1071 1072 int 1073 pciide_pci_intr(arg) 1074 void *arg; 1075 { 1076 struct pciide_softc *sc = arg; 1077 struct pciide_channel *cp; 1078 struct channel_softc *wdc_cp; 1079 int i, rv, crv; 1080 1081 rv = 0; 1082 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1083 cp = &sc->pciide_channels[i]; 1084 wdc_cp = &cp->wdc_channel; 1085 1086 /* If a compat channel skip. */ 1087 if (cp->compat) 1088 continue; 1089 /* if this channel not waiting for intr, skip */ 1090 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 1091 continue; 1092 1093 if (pciide_intr_flag(cp) == 0) 1094 continue; 1095 1096 crv = wdcintr(wdc_cp); 1097 if (crv == 0) 1098 ; /* leave rv alone */ 1099 else if (crv == 1) 1100 rv = 1; /* claim the intr */ 1101 else if (rv == 0) /* crv should be -1 in this case */ 1102 rv = crv; /* if we've done no better, take it */ 1103 } 1104 return (rv); 1105 } 1106 1107 void 1108 pciide_channel_dma_setup(cp) 1109 struct pciide_channel *cp; 1110 { 1111 int drive; 1112 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1113 struct ata_drive_datas *drvp; 1114 1115 for (drive = 0; drive < 2; drive++) { 1116 drvp = &cp->wdc_channel.ch_drive[drive]; 1117 /* If no drive, skip */ 1118 if ((drvp->drive_flags & DRIVE) == 0) 1119 continue; 1120 /* setup DMA if needed */ 1121 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1122 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1123 sc->sc_dma_ok == 0) { 1124 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1125 continue; 1126 } 1127 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1128 != 0) { 1129 /* Abort DMA setup */ 1130 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1131 continue; 1132 } 1133 } 1134 } 1135 1136 int 1137 pciide_dma_table_setup(sc, channel, drive) 1138 struct pciide_softc *sc; 1139 int channel, drive; 1140 { 1141 bus_dma_segment_t seg; 1142 int error, rseg; 1143 const bus_size_t dma_table_size = 1144 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1145 struct pciide_dma_maps *dma_maps = 1146 &sc->pciide_channels[channel].dma_maps[drive]; 1147 1148 /* If table was already allocated, just return */ 1149 if (dma_maps->dma_table) 1150 return 0; 1151 1152 /* Allocate memory for the DMA tables and map it */ 1153 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1154 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1155 BUS_DMA_NOWAIT)) != 0) { 1156 printf("%s:%d: unable to allocate table DMA for " 1157 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1158 channel, drive, error); 1159 return error; 1160 } 1161 1162 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1163 dma_table_size, 1164 (caddr_t *)&dma_maps->dma_table, 1165 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1166 printf("%s:%d: unable to map table DMA for" 1167 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1168 channel, drive, error); 1169 return error; 1170 } 1171 1172 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 1173 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 1174 seg.ds_addr), DEBUG_PROBE); 1175 1176 /* Create and load table DMA map for this disk */ 1177 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1178 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1179 &dma_maps->dmamap_table)) != 0) { 1180 printf("%s:%d: unable to create table DMA map for " 1181 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1182 channel, drive, error); 1183 return error; 1184 } 1185 if ((error = bus_dmamap_load(sc->sc_dmat, 1186 dma_maps->dmamap_table, 1187 dma_maps->dma_table, 1188 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1189 printf("%s:%d: unable to load table DMA map for " 1190 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1191 channel, drive, error); 1192 return error; 1193 } 1194 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1195 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 1196 /* Create a xfer DMA map for this drive */ 1197 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1198 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 1199 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1200 &dma_maps->dmamap_xfer)) != 0) { 1201 printf("%s:%d: unable to create xfer DMA map for " 1202 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1203 channel, drive, error); 1204 return error; 1205 } 1206 return 0; 1207 } 1208 1209 int 1210 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 1211 void *v; 1212 int channel, drive; 1213 void *databuf; 1214 size_t datalen; 1215 int flags; 1216 { 1217 struct pciide_softc *sc = v; 1218 int error, seg; 1219 struct pciide_dma_maps *dma_maps = 1220 &sc->pciide_channels[channel].dma_maps[drive]; 1221 #ifndef BUS_DMA_RAW 1222 #define BUS_DMA_RAW 0 1223 #endif 1224 1225 error = bus_dmamap_load(sc->sc_dmat, 1226 dma_maps->dmamap_xfer, 1227 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 1228 if (error) { 1229 printf("%s:%d: unable to load xfer DMA map for" 1230 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1231 channel, drive, error); 1232 return error; 1233 } 1234 1235 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1236 dma_maps->dmamap_xfer->dm_mapsize, 1237 (flags & WDC_DMA_READ) ? 1238 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1239 1240 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1241 #ifdef DIAGNOSTIC 1242 /* A segment must not cross a 64k boundary */ 1243 { 1244 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1245 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1246 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1247 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1248 printf("pciide_dma: segment %d physical addr 0x%lx" 1249 " len 0x%lx not properly aligned\n", 1250 seg, phys, len); 1251 panic("pciide_dma: buf align"); 1252 } 1253 } 1254 #endif 1255 dma_maps->dma_table[seg].base_addr = 1256 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1257 dma_maps->dma_table[seg].byte_count = 1258 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1259 IDEDMA_BYTE_COUNT_MASK); 1260 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1261 seg, letoh32(dma_maps->dma_table[seg].byte_count), 1262 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1263 1264 } 1265 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1266 htole32(IDEDMA_BYTE_COUNT_EOT); 1267 1268 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1269 dma_maps->dmamap_table->dm_mapsize, 1270 BUS_DMASYNC_PREWRITE); 1271 1272 /* Maps are ready. Start DMA function */ 1273 #ifdef DIAGNOSTIC 1274 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1275 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1276 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1277 panic("pciide_dma_init: table align"); 1278 } 1279 #endif 1280 1281 /* Clear status bits */ 1282 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1283 IDEDMA_CTL(channel), 1284 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1285 IDEDMA_CTL(channel))); 1286 /* Write table addr */ 1287 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1288 IDEDMA_TBL(channel), 1289 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1290 /* set read/write */ 1291 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1292 IDEDMA_CMD(channel), 1293 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 1294 /* remember flags */ 1295 dma_maps->dma_flags = flags; 1296 return 0; 1297 } 1298 1299 void 1300 pciide_dma_start(v, channel, drive) 1301 void *v; 1302 int channel, drive; 1303 { 1304 struct pciide_softc *sc = v; 1305 1306 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 1307 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1308 IDEDMA_CMD(channel), 1309 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1310 IDEDMA_CMD(channel)) | IDEDMA_CMD_START); 1311 1312 sc->pciide_channels[channel].dma_in_progress = 1; 1313 } 1314 1315 int 1316 pciide_dma_finish(v, channel, drive, force) 1317 void *v; 1318 int channel, drive; 1319 int force; 1320 { 1321 struct pciide_softc *sc = v; 1322 u_int8_t status; 1323 int error = 0; 1324 struct pciide_dma_maps *dma_maps = 1325 &sc->pciide_channels[channel].dma_maps[drive]; 1326 1327 sc->pciide_channels[channel].dma_in_progress = 0; 1328 1329 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1330 IDEDMA_CTL(channel)); 1331 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1332 DEBUG_XFERS); 1333 1334 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 1335 return WDC_DMAST_NOIRQ; 1336 1337 /* stop DMA channel */ 1338 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1339 IDEDMA_CMD(channel), 1340 (dma_maps->dma_flags & WDC_DMA_READ) ? 1341 0x00 : IDEDMA_CMD_WRITE); 1342 1343 /* Unload the map of the data buffer */ 1344 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1345 dma_maps->dmamap_xfer->dm_mapsize, 1346 (dma_maps->dma_flags & WDC_DMA_READ) ? 1347 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1348 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1349 1350 /* Clear status bits */ 1351 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1352 IDEDMA_CTL(channel), 1353 status); 1354 1355 if ((status & IDEDMA_CTL_ERR) != 0) { 1356 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1357 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1358 error |= WDC_DMAST_ERR; 1359 } 1360 1361 if ((status & IDEDMA_CTL_INTR) == 0) { 1362 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1363 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1364 drive, status); 1365 error |= WDC_DMAST_NOIRQ; 1366 } 1367 1368 if ((status & IDEDMA_CTL_ACT) != 0) { 1369 /* data underrun, may be a valid condition for ATAPI */ 1370 error |= WDC_DMAST_UNDER; 1371 } 1372 return error; 1373 } 1374 1375 void 1376 pciide_irqack(chp) 1377 struct channel_softc *chp; 1378 { 1379 struct pciide_channel *cp = (struct pciide_channel*)chp; 1380 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1381 1382 /* clear status bits in IDE DMA registers */ 1383 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1384 IDEDMA_CTL(chp->channel), 1385 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1386 IDEDMA_CTL(chp->channel))); 1387 } 1388 1389 /* some common code used by several chip_map */ 1390 int 1391 pciide_chansetup(sc, channel, interface) 1392 struct pciide_softc *sc; 1393 int channel; 1394 pcireg_t interface; 1395 { 1396 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1397 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1398 cp->name = PCIIDE_CHANNEL_NAME(channel); 1399 cp->wdc_channel.channel = channel; 1400 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1401 cp->wdc_channel.ch_queue = 1402 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1403 if (cp->wdc_channel.ch_queue == NULL) { 1404 printf("%s: %s " 1405 "cannot allocate memory for command queue", 1406 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1407 return 0; 1408 } 1409 cp->hw_ok = 1; 1410 1411 return 1; 1412 } 1413 1414 /* some common code used by several chip channel_map */ 1415 void 1416 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1417 struct pci_attach_args *pa; 1418 struct pciide_channel *cp; 1419 pcireg_t interface; 1420 bus_size_t *cmdsizep, *ctlsizep; 1421 int (*pci_intr)(void *); 1422 { 1423 struct channel_softc *wdc_cp = &cp->wdc_channel; 1424 1425 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1426 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1427 pci_intr); 1428 else 1429 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1430 wdc_cp->channel, cmdsizep, ctlsizep); 1431 if (cp->hw_ok == 0) 1432 return; 1433 wdc_cp->data32iot = wdc_cp->cmd_iot; 1434 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1435 wdcattach(wdc_cp); 1436 } 1437 1438 /* 1439 * Generic code to call to know if a channel can be disabled. Return 1 1440 * if channel can be disabled, 0 if not 1441 */ 1442 int 1443 pciide_chan_candisable(cp) 1444 struct pciide_channel *cp; 1445 { 1446 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1447 struct channel_softc *wdc_cp = &cp->wdc_channel; 1448 1449 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1450 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1451 printf("%s: %s disabled (no drives)\n", 1452 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1453 cp->hw_ok = 0; 1454 return 1; 1455 } 1456 return 0; 1457 } 1458 1459 /* 1460 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1461 * Set hw_ok=0 on failure 1462 */ 1463 void 1464 pciide_map_compat_intr(pa, cp, compatchan, interface) 1465 struct pci_attach_args *pa; 1466 struct pciide_channel *cp; 1467 int compatchan, interface; 1468 { 1469 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1470 struct channel_softc *wdc_cp = &cp->wdc_channel; 1471 1472 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1473 return; 1474 1475 cp->compat = 1; 1476 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1477 pa, compatchan, pciide_compat_intr, cp); 1478 if (cp->ih == NULL) { 1479 printf("%s: no compatibility interrupt for use by %s\n", 1480 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1481 cp->hw_ok = 0; 1482 } 1483 } 1484 1485 /* 1486 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 1487 * Set hw_ok=0 on failure 1488 */ 1489 void 1490 pciide_unmap_compat_intr(pa, cp, compatchan, interface) 1491 struct pci_attach_args *pa; 1492 struct pciide_channel *cp; 1493 int compatchan, interface; 1494 { 1495 struct channel_softc *wdc_cp = &cp->wdc_channel; 1496 1497 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1498 return; 1499 1500 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 1501 } 1502 1503 void 1504 pciide_print_channels(nchannels, interface) 1505 int nchannels; 1506 pcireg_t interface; 1507 { 1508 int i; 1509 1510 for (i = 0; i < nchannels; i++) { 1511 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 1512 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 1513 "configured" : "wired", 1514 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 1515 "compatibility"); 1516 } 1517 1518 printf("\n"); 1519 } 1520 1521 void 1522 pciide_print_modes(cp) 1523 struct pciide_channel *cp; 1524 { 1525 wdc_print_current_modes(&cp->wdc_channel); 1526 } 1527 1528 void 1529 default_chip_map(sc, pa) 1530 struct pciide_softc *sc; 1531 struct pci_attach_args *pa; 1532 { 1533 struct pciide_channel *cp; 1534 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1535 pcireg_t csr; 1536 int channel, drive; 1537 struct ata_drive_datas *drvp; 1538 u_int8_t idedma_ctl; 1539 bus_size_t cmdsize, ctlsize; 1540 char *failreason; 1541 1542 if (pciide_chipen(sc, pa) == 0) 1543 return; 1544 1545 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1546 printf(": DMA"); 1547 if (sc->sc_pp == &default_product_desc && 1548 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1549 PCIIDE_OPTIONS_DMA) == 0) { 1550 printf(" (unsupported)"); 1551 sc->sc_dma_ok = 0; 1552 } else { 1553 pciide_mapreg_dma(sc, pa); 1554 if (sc->sc_dma_ok != 0) 1555 printf(", (partial support)"); 1556 } 1557 } else { 1558 printf(": no DMA"); 1559 sc->sc_dma_ok = 0; 1560 } 1561 if (sc->sc_dma_ok) { 1562 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1563 sc->sc_wdcdev.irqack = pciide_irqack; 1564 } 1565 sc->sc_wdcdev.PIO_cap = 0; 1566 sc->sc_wdcdev.DMA_cap = 0; 1567 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1568 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1569 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1570 1571 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 1572 1573 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1574 cp = &sc->pciide_channels[channel]; 1575 if (pciide_chansetup(sc, channel, interface) == 0) 1576 continue; 1577 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1578 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1579 &ctlsize, pciide_pci_intr); 1580 } else { 1581 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1582 channel, &cmdsize, &ctlsize); 1583 } 1584 if (cp->hw_ok == 0) 1585 continue; 1586 /* 1587 * Check to see if something appears to be there. 1588 */ 1589 failreason = NULL; 1590 pciide_map_compat_intr(pa, cp, channel, interface); 1591 if (cp->hw_ok == 0) 1592 continue; 1593 if (!wdcprobe(&cp->wdc_channel)) { 1594 failreason = "not responding; disabled or no drives?"; 1595 goto next; 1596 } 1597 /* 1598 * Now, make sure it's actually attributable to this PCI IDE 1599 * channel by trying to access the channel again while the 1600 * PCI IDE controller's I/O space is disabled. (If the 1601 * channel no longer appears to be there, it belongs to 1602 * this controller.) YUCK! 1603 */ 1604 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1605 PCI_COMMAND_STATUS_REG); 1606 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1607 csr & ~PCI_COMMAND_IO_ENABLE); 1608 if (wdcprobe(&cp->wdc_channel)) 1609 failreason = "other hardware responding at addresses"; 1610 pci_conf_write(sc->sc_pc, sc->sc_tag, 1611 PCI_COMMAND_STATUS_REG, csr); 1612 next: 1613 if (failreason) { 1614 printf("%s: %s ignored (%s)\n", 1615 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1616 failreason); 1617 cp->hw_ok = 0; 1618 pciide_unmap_compat_intr(pa, cp, channel, interface); 1619 bus_space_unmap(cp->wdc_channel.cmd_iot, 1620 cp->wdc_channel.cmd_ioh, cmdsize); 1621 bus_space_unmap(cp->wdc_channel.ctl_iot, 1622 cp->wdc_channel.ctl_ioh, ctlsize); 1623 } 1624 if (cp->hw_ok) { 1625 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1626 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1627 wdcattach(&cp->wdc_channel); 1628 } 1629 } 1630 1631 if (sc->sc_dma_ok == 0) 1632 return; 1633 1634 /* Allocate DMA maps */ 1635 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1636 idedma_ctl = 0; 1637 cp = &sc->pciide_channels[channel]; 1638 for (drive = 0; drive < 2; drive++) { 1639 drvp = &cp->wdc_channel.ch_drive[drive]; 1640 /* If no drive, skip */ 1641 if ((drvp->drive_flags & DRIVE) == 0) 1642 continue; 1643 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1644 continue; 1645 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1646 /* Abort DMA setup */ 1647 printf("%s:%d:%d: cannot allocate DMA maps, " 1648 "using PIO transfers\n", 1649 sc->sc_wdcdev.sc_dev.dv_xname, 1650 channel, drive); 1651 drvp->drive_flags &= ~DRIVE_DMA; 1652 } 1653 printf("%s:%d:%d: using DMA data transfers\n", 1654 sc->sc_wdcdev.sc_dev.dv_xname, 1655 channel, drive); 1656 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1657 } 1658 if (idedma_ctl != 0) { 1659 /* Add software bits in status register */ 1660 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1661 IDEDMA_CTL(channel), 1662 idedma_ctl); 1663 } 1664 } 1665 } 1666 1667 void 1668 sata_setup_channel(struct channel_softc *chp) 1669 { 1670 struct ata_drive_datas *drvp; 1671 int drive; 1672 u_int32_t idedma_ctl; 1673 struct pciide_channel *cp = (struct pciide_channel*)chp; 1674 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc; 1675 1676 /* setup DMA if needed */ 1677 pciide_channel_dma_setup(cp); 1678 1679 idedma_ctl = 0; 1680 1681 for (drive = 0; drive < 2; drive++) { 1682 drvp = &chp->ch_drive[drive]; 1683 /* If no drive, skip */ 1684 if ((drvp->drive_flags & DRIVE) == 0) 1685 continue; 1686 if (drvp->drive_flags & DRIVE_UDMA) { 1687 /* use Ultra/DMA */ 1688 drvp->drive_flags &= ~DRIVE_DMA; 1689 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1690 } else if (drvp->drive_flags & DRIVE_DMA) { 1691 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1692 } 1693 } 1694 1695 /* 1696 * Nothing to do to setup modes; it is meaningless in S-ATA 1697 * (but many S-ATA drives still want to get the SET_FEATURE 1698 * command). 1699 */ 1700 if (idedma_ctl != 0) { 1701 /* Add software bits in status register */ 1702 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1703 IDEDMA_CTL(chp->channel), idedma_ctl); 1704 } 1705 pciide_print_modes(cp); 1706 } 1707 1708 void 1709 piix_chip_map(sc, pa) 1710 struct pciide_softc *sc; 1711 struct pci_attach_args *pa; 1712 { 1713 struct pciide_channel *cp; 1714 int channel; 1715 u_int32_t idetim; 1716 bus_size_t cmdsize, ctlsize; 1717 1718 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1719 1720 if (pciide_chipen(sc, pa) == 0) 1721 return; 1722 1723 printf(": DMA"); 1724 pciide_mapreg_dma(sc, pa); 1725 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1726 WDC_CAPABILITY_MODE; 1727 if (sc->sc_dma_ok) { 1728 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1729 sc->sc_wdcdev.irqack = pciide_irqack; 1730 switch (sc->sc_pp->ide_product) { 1731 case PCI_PRODUCT_INTEL_82371AB_IDE: 1732 case PCI_PRODUCT_INTEL_82440MX_IDE: 1733 case PCI_PRODUCT_INTEL_82801AA_IDE: 1734 case PCI_PRODUCT_INTEL_82801AB_IDE: 1735 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1736 case PCI_PRODUCT_INTEL_82801BA_IDE: 1737 case PCI_PRODUCT_INTEL_82801CAM_IDE: 1738 case PCI_PRODUCT_INTEL_82801CA_IDE: 1739 case PCI_PRODUCT_INTEL_82801DB_IDE: 1740 case PCI_PRODUCT_INTEL_82801DBM_IDE: 1741 case PCI_PRODUCT_INTEL_82801EB_IDE: 1742 case PCI_PRODUCT_INTEL_82801EB_SATA: 1743 case PCI_PRODUCT_INTEL_82801ER_SATA: 1744 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1745 break; 1746 } 1747 } 1748 sc->sc_wdcdev.PIO_cap = 4; 1749 sc->sc_wdcdev.DMA_cap = 2; 1750 switch (sc->sc_pp->ide_product) { 1751 case PCI_PRODUCT_INTEL_82801AA_IDE: 1752 sc->sc_wdcdev.UDMA_cap = 4; 1753 break; 1754 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1755 case PCI_PRODUCT_INTEL_82801BA_IDE: 1756 case PCI_PRODUCT_INTEL_82801CAM_IDE: 1757 case PCI_PRODUCT_INTEL_82801CA_IDE: 1758 case PCI_PRODUCT_INTEL_82801DB_IDE: 1759 case PCI_PRODUCT_INTEL_82801DBM_IDE: 1760 case PCI_PRODUCT_INTEL_82801EB_IDE: 1761 case PCI_PRODUCT_INTEL_82801EB_SATA: 1762 case PCI_PRODUCT_INTEL_82801ER_SATA: 1763 sc->sc_wdcdev.UDMA_cap = 5; 1764 break; 1765 default: 1766 sc->sc_wdcdev.UDMA_cap = 2; 1767 break; 1768 } 1769 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_SATA || 1770 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801ER_SATA) { 1771 sc->sc_wdcdev.cap |= WDC_CAPABILITY_SATA; 1772 sc->sc_wdcdev.set_modes = sata_setup_channel; 1773 } else if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) { 1774 sc->sc_wdcdev.set_modes = piix_setup_channel; 1775 } else { 1776 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1777 } 1778 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1779 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1780 1781 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 1782 1783 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_SATA || 1784 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801ER_SATA) 1785 goto chansetup; 1786 1787 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1788 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1789 DEBUG_PROBE); 1790 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1791 WDCDEBUG_PRINT((", sidetim=0x%x", 1792 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1793 DEBUG_PROBE); 1794 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1795 WDCDEBUG_PRINT((", udamreg 0x%x", 1796 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1797 DEBUG_PROBE); 1798 } 1799 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1800 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1801 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1802 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1803 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 1804 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 1805 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 1806 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 1807 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) { 1808 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1809 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1810 DEBUG_PROBE); 1811 } 1812 1813 } 1814 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1815 1816 chansetup: 1817 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1818 cp = &sc->pciide_channels[channel]; 1819 1820 /* SATA setup */ 1821 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_SATA || 1822 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801ER_SATA) { 1823 if (pciide_chansetup(sc, channel, interface) == 0) 1824 continue; 1825 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1826 pciide_pci_intr); 1827 if (cp->hw_ok == 0) 1828 continue; 1829 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1830 continue; 1831 } 1832 1833 /* PIIX is compat-only */ 1834 if (pciide_chansetup(sc, channel, 0) == 0) 1835 continue; 1836 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1837 if ((PIIX_IDETIM_READ(idetim, channel) & 1838 PIIX_IDETIM_IDE) == 0) { 1839 printf("%s: %s ignored (disabled)\n", 1840 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1841 continue; 1842 } 1843 /* PIIX are compat-only pciide devices */ 1844 pciide_map_compat_intr(pa, cp, channel, 0); 1845 if (cp->hw_ok == 0) 1846 continue; 1847 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1848 if (cp->hw_ok == 0) 1849 goto next; 1850 if (pciide_chan_candisable(cp)) { 1851 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1852 channel); 1853 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1854 idetim); 1855 } 1856 if (cp->hw_ok == 0) 1857 goto next; 1858 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1859 next: 1860 if (cp->hw_ok == 0) 1861 pciide_unmap_compat_intr(pa, cp, channel, 0); 1862 } 1863 1864 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_SATA || 1865 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801ER_SATA) 1866 return; 1867 1868 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1869 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1870 DEBUG_PROBE); 1871 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1872 WDCDEBUG_PRINT((", sidetim=0x%x", 1873 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1874 DEBUG_PROBE); 1875 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1876 WDCDEBUG_PRINT((", udamreg 0x%x", 1877 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1878 DEBUG_PROBE); 1879 } 1880 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1881 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1882 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1883 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1884 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 1885 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 1886 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 1887 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 1888 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) { 1889 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1890 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1891 DEBUG_PROBE); 1892 } 1893 } 1894 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1895 } 1896 1897 void 1898 piix_setup_channel(chp) 1899 struct channel_softc *chp; 1900 { 1901 u_int8_t mode[2], drive; 1902 u_int32_t oidetim, idetim, idedma_ctl; 1903 struct pciide_channel *cp = (struct pciide_channel*)chp; 1904 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1905 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1906 1907 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1908 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1909 idedma_ctl = 0; 1910 1911 /* set up new idetim: Enable IDE registers decode */ 1912 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1913 chp->channel); 1914 1915 /* setup DMA */ 1916 pciide_channel_dma_setup(cp); 1917 1918 /* 1919 * Here we have to mess up with drives mode: PIIX can't have 1920 * different timings for master and slave drives. 1921 * We need to find the best combination. 1922 */ 1923 1924 /* If both drives supports DMA, take the lower mode */ 1925 if ((drvp[0].drive_flags & DRIVE_DMA) && 1926 (drvp[1].drive_flags & DRIVE_DMA)) { 1927 mode[0] = mode[1] = 1928 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1929 drvp[0].DMA_mode = mode[0]; 1930 drvp[1].DMA_mode = mode[1]; 1931 goto ok; 1932 } 1933 /* 1934 * If only one drive supports DMA, use its mode, and 1935 * put the other one in PIO mode 0 if mode not compatible 1936 */ 1937 if (drvp[0].drive_flags & DRIVE_DMA) { 1938 mode[0] = drvp[0].DMA_mode; 1939 mode[1] = drvp[1].PIO_mode; 1940 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1941 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1942 mode[1] = drvp[1].PIO_mode = 0; 1943 goto ok; 1944 } 1945 if (drvp[1].drive_flags & DRIVE_DMA) { 1946 mode[1] = drvp[1].DMA_mode; 1947 mode[0] = drvp[0].PIO_mode; 1948 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1949 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1950 mode[0] = drvp[0].PIO_mode = 0; 1951 goto ok; 1952 } 1953 /* 1954 * If both drives are not DMA, takes the lower mode, unless 1955 * one of them is PIO mode < 2 1956 */ 1957 if (drvp[0].PIO_mode < 2) { 1958 mode[0] = drvp[0].PIO_mode = 0; 1959 mode[1] = drvp[1].PIO_mode; 1960 } else if (drvp[1].PIO_mode < 2) { 1961 mode[1] = drvp[1].PIO_mode = 0; 1962 mode[0] = drvp[0].PIO_mode; 1963 } else { 1964 mode[0] = mode[1] = 1965 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1966 drvp[0].PIO_mode = mode[0]; 1967 drvp[1].PIO_mode = mode[1]; 1968 } 1969 ok: /* The modes are setup */ 1970 for (drive = 0; drive < 2; drive++) { 1971 if (drvp[drive].drive_flags & DRIVE_DMA) { 1972 idetim |= piix_setup_idetim_timings( 1973 mode[drive], 1, chp->channel); 1974 goto end; 1975 } 1976 } 1977 /* If we are there, none of the drives are DMA */ 1978 if (mode[0] >= 2) 1979 idetim |= piix_setup_idetim_timings( 1980 mode[0], 0, chp->channel); 1981 else 1982 idetim |= piix_setup_idetim_timings( 1983 mode[1], 0, chp->channel); 1984 end: /* 1985 * timing mode is now set up in the controller. Enable 1986 * it per-drive 1987 */ 1988 for (drive = 0; drive < 2; drive++) { 1989 /* If no drive, skip */ 1990 if ((drvp[drive].drive_flags & DRIVE) == 0) 1991 continue; 1992 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1993 if (drvp[drive].drive_flags & DRIVE_DMA) 1994 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1995 } 1996 if (idedma_ctl != 0) { 1997 /* Add software bits in status register */ 1998 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1999 IDEDMA_CTL(chp->channel), 2000 idedma_ctl); 2001 } 2002 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2003 pciide_print_modes(cp); 2004 } 2005 2006 void 2007 piix3_4_setup_channel(chp) 2008 struct channel_softc *chp; 2009 { 2010 struct ata_drive_datas *drvp; 2011 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 2012 struct pciide_channel *cp = (struct pciide_channel*)chp; 2013 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2014 int drive; 2015 int channel = chp->channel; 2016 2017 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2018 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 2019 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 2020 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 2021 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 2022 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 2023 PIIX_SIDETIM_RTC_MASK(channel)); 2024 2025 idedma_ctl = 0; 2026 /* If channel disabled, no need to go further */ 2027 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 2028 return; 2029 /* set up new idetim: Enable IDE registers decode */ 2030 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 2031 2032 /* setup DMA if needed */ 2033 pciide_channel_dma_setup(cp); 2034 2035 for (drive = 0; drive < 2; drive++) { 2036 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 2037 PIIX_UDMATIM_SET(0x3, channel, drive)); 2038 drvp = &chp->ch_drive[drive]; 2039 /* If no drive, skip */ 2040 if ((drvp->drive_flags & DRIVE) == 0) 2041 continue; 2042 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2043 (drvp->drive_flags & DRIVE_UDMA) == 0)) 2044 goto pio; 2045 2046 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2047 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2048 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2049 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2050 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2051 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2052 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2053 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2054 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) { 2055 ideconf |= PIIX_CONFIG_PINGPONG; 2056 } 2057 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2058 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 2059 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 2060 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2061 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2062 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2063 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) { 2064 /* setup Ultra/100 */ 2065 if (drvp->UDMA_mode > 2 && 2066 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2067 drvp->UDMA_mode = 2; 2068 if (drvp->UDMA_mode > 4) { 2069 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 2070 } else { 2071 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 2072 if (drvp->UDMA_mode > 2) { 2073 ideconf |= PIIX_CONFIG_UDMA66(channel, 2074 drive); 2075 } else { 2076 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 2077 drive); 2078 } 2079 } 2080 } 2081 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 2082 /* setup Ultra/66 */ 2083 if (drvp->UDMA_mode > 2 && 2084 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2085 drvp->UDMA_mode = 2; 2086 if (drvp->UDMA_mode > 2) 2087 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 2088 else 2089 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 2090 } 2091 2092 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2093 (drvp->drive_flags & DRIVE_UDMA)) { 2094 /* use Ultra/DMA */ 2095 drvp->drive_flags &= ~DRIVE_DMA; 2096 udmareg |= PIIX_UDMACTL_DRV_EN( channel,drive); 2097 udmareg |= PIIX_UDMATIM_SET( 2098 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 2099 } else { 2100 /* use Multiword DMA */ 2101 drvp->drive_flags &= ~DRIVE_UDMA; 2102 if (drive == 0) { 2103 idetim |= piix_setup_idetim_timings( 2104 drvp->DMA_mode, 1, channel); 2105 } else { 2106 sidetim |= piix_setup_sidetim_timings( 2107 drvp->DMA_mode, 1, channel); 2108 idetim =PIIX_IDETIM_SET(idetim, 2109 PIIX_IDETIM_SITRE, channel); 2110 } 2111 } 2112 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2113 2114 pio: /* use PIO mode */ 2115 idetim |= piix_setup_idetim_drvs(drvp); 2116 if (drive == 0) { 2117 idetim |= piix_setup_idetim_timings( 2118 drvp->PIO_mode, 0, channel); 2119 } else { 2120 sidetim |= piix_setup_sidetim_timings( 2121 drvp->PIO_mode, 0, channel); 2122 idetim =PIIX_IDETIM_SET(idetim, 2123 PIIX_IDETIM_SITRE, channel); 2124 } 2125 } 2126 if (idedma_ctl != 0) { 2127 /* Add software bits in status register */ 2128 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2129 IDEDMA_CTL(channel), 2130 idedma_ctl); 2131 } 2132 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2133 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 2134 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 2135 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 2136 pciide_print_modes(cp); 2137 } 2138 2139 2140 /* setup ISP and RTC fields, based on mode */ 2141 static u_int32_t 2142 piix_setup_idetim_timings(mode, dma, channel) 2143 u_int8_t mode; 2144 u_int8_t dma; 2145 u_int8_t channel; 2146 { 2147 2148 if (dma) 2149 return PIIX_IDETIM_SET(0, 2150 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 2151 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 2152 channel); 2153 else 2154 return PIIX_IDETIM_SET(0, 2155 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 2156 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 2157 channel); 2158 } 2159 2160 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 2161 static u_int32_t 2162 piix_setup_idetim_drvs(drvp) 2163 struct ata_drive_datas *drvp; 2164 { 2165 u_int32_t ret = 0; 2166 struct channel_softc *chp = drvp->chnl_softc; 2167 u_int8_t channel = chp->channel; 2168 u_int8_t drive = drvp->drive; 2169 2170 /* 2171 * If drive is using UDMA, timings setups are independant 2172 * So just check DMA and PIO here. 2173 */ 2174 if (drvp->drive_flags & DRIVE_DMA) { 2175 /* if mode = DMA mode 0, use compatible timings */ 2176 if ((drvp->drive_flags & DRIVE_DMA) && 2177 drvp->DMA_mode == 0) { 2178 drvp->PIO_mode = 0; 2179 return ret; 2180 } 2181 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2182 /* 2183 * PIO and DMA timings are the same, use fast timings for PIO 2184 * too, else use compat timings. 2185 */ 2186 if ((piix_isp_pio[drvp->PIO_mode] != 2187 piix_isp_dma[drvp->DMA_mode]) || 2188 (piix_rtc_pio[drvp->PIO_mode] != 2189 piix_rtc_dma[drvp->DMA_mode])) 2190 drvp->PIO_mode = 0; 2191 /* if PIO mode <= 2, use compat timings for PIO */ 2192 if (drvp->PIO_mode <= 2) { 2193 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 2194 channel); 2195 return ret; 2196 } 2197 } 2198 2199 /* 2200 * Now setup PIO modes. If mode < 2, use compat timings. 2201 * Else enable fast timings. Enable IORDY and prefetch/post 2202 * if PIO mode >= 3. 2203 */ 2204 2205 if (drvp->PIO_mode < 2) 2206 return ret; 2207 2208 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2209 if (drvp->PIO_mode >= 3) { 2210 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 2211 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 2212 } 2213 return ret; 2214 } 2215 2216 /* setup values in SIDETIM registers, based on mode */ 2217 static u_int32_t 2218 piix_setup_sidetim_timings(mode, dma, channel) 2219 u_int8_t mode; 2220 u_int8_t dma; 2221 u_int8_t channel; 2222 { 2223 if (dma) 2224 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 2225 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 2226 else 2227 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 2228 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 2229 } 2230 2231 void 2232 amd756_chip_map(sc, pa) 2233 struct pciide_softc *sc; 2234 struct pci_attach_args *pa; 2235 { 2236 struct pciide_channel *cp; 2237 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2238 int channel; 2239 pcireg_t chanenable; 2240 bus_size_t cmdsize, ctlsize; 2241 2242 if (pciide_chipen(sc, pa) == 0) 2243 return; 2244 2245 printf(": DMA"); 2246 pciide_mapreg_dma(sc, pa); 2247 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2248 WDC_CAPABILITY_MODE; 2249 if (sc->sc_dma_ok) { 2250 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2251 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2252 sc->sc_wdcdev.irqack = pciide_irqack; 2253 } 2254 sc->sc_wdcdev.PIO_cap = 4; 2255 sc->sc_wdcdev.DMA_cap = 2; 2256 switch (sc->sc_pp->ide_product) { 2257 case PCI_PRODUCT_AMD_766_IDE: 2258 case PCI_PRODUCT_AMD_PBC768_IDE: 2259 case PCI_PRODUCT_AMD_8111_IDE: 2260 sc->sc_wdcdev.UDMA_cap = 5; 2261 break; 2262 default: 2263 sc->sc_wdcdev.UDMA_cap = 4; 2264 break; 2265 } 2266 sc->sc_wdcdev.set_modes = amd756_setup_channel; 2267 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2268 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2269 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 2270 2271 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2272 2273 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2274 cp = &sc->pciide_channels[channel]; 2275 if (pciide_chansetup(sc, channel, interface) == 0) 2276 continue; 2277 2278 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 2279 printf("%s: %s ignored (disabled)\n", 2280 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2281 continue; 2282 } 2283 pciide_map_compat_intr(pa, cp, channel, interface); 2284 if (cp->hw_ok == 0) 2285 continue; 2286 2287 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2288 pciide_pci_intr); 2289 2290 if (pciide_chan_candisable(cp)) { 2291 chanenable &= ~AMD756_CHAN_EN(channel); 2292 } 2293 if (cp->hw_ok == 0) { 2294 pciide_unmap_compat_intr(pa, cp, channel, interface); 2295 continue; 2296 } 2297 2298 amd756_setup_channel(&cp->wdc_channel); 2299 } 2300 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 2301 chanenable); 2302 return; 2303 } 2304 2305 void 2306 amd756_setup_channel(chp) 2307 struct channel_softc *chp; 2308 { 2309 u_int32_t udmatim_reg, datatim_reg; 2310 u_int8_t idedma_ctl; 2311 int mode, drive; 2312 struct ata_drive_datas *drvp; 2313 struct pciide_channel *cp = (struct pciide_channel*)chp; 2314 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2315 pcireg_t chanenable; 2316 #ifndef PCIIDE_AMD756_ENABLEDMA 2317 int product = PCI_PRODUCT( 2318 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ID_REG)); 2319 int rev = PCI_REVISION( 2320 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2321 #endif 2322 2323 idedma_ctl = 0; 2324 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 2325 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 2326 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 2327 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 2328 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 2329 AMD756_CHANSTATUS_EN); 2330 2331 /* setup DMA if needed */ 2332 pciide_channel_dma_setup(cp); 2333 2334 for (drive = 0; drive < 2; drive++) { 2335 drvp = &chp->ch_drive[drive]; 2336 /* If no drive, skip */ 2337 if ((drvp->drive_flags & DRIVE) == 0) 2338 continue; 2339 /* add timing values, setup DMA if needed */ 2340 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2341 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2342 mode = drvp->PIO_mode; 2343 goto pio; 2344 } 2345 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2346 (drvp->drive_flags & DRIVE_UDMA)) { 2347 /* use Ultra/DMA */ 2348 drvp->drive_flags &= ~DRIVE_DMA; 2349 2350 /* Check cable */ 2351 if ((chanenable & AMD756_CABLE(chp->channel, 2352 drive)) == 0 && drvp->UDMA_mode > 2) { 2353 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 2354 "cable not detected\n", drvp->drive_name, 2355 sc->sc_wdcdev.sc_dev.dv_xname, 2356 chp->channel, drive), DEBUG_PROBE); 2357 drvp->UDMA_mode = 2; 2358 } 2359 2360 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 2361 AMD756_UDMA_EN_MTH(chp->channel, drive) | 2362 AMD756_UDMA_TIME(chp->channel, drive, 2363 amd756_udma_tim[drvp->UDMA_mode]); 2364 /* can use PIO timings, MW DMA unused */ 2365 mode = drvp->PIO_mode; 2366 } else { 2367 /* use Multiword DMA, but only if revision is OK */ 2368 drvp->drive_flags &= ~DRIVE_UDMA; 2369 #ifndef PCIIDE_AMD756_ENABLEDMA 2370 /* 2371 * The workaround doesn't seem to be necessary 2372 * with all drives, so it can be disabled by 2373 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 2374 * triggered. 2375 */ 2376 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 2377 printf("%s:%d:%d: multi-word DMA disabled due " 2378 "to chip revision\n", 2379 sc->sc_wdcdev.sc_dev.dv_xname, 2380 chp->channel, drive); 2381 mode = drvp->PIO_mode; 2382 drvp->drive_flags &= ~DRIVE_DMA; 2383 goto pio; 2384 } 2385 #endif 2386 /* mode = min(pio, dma+2) */ 2387 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2388 mode = drvp->PIO_mode; 2389 else 2390 mode = drvp->DMA_mode + 2; 2391 } 2392 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2393 2394 pio: /* setup PIO mode */ 2395 if (mode <= 2) { 2396 drvp->DMA_mode = 0; 2397 drvp->PIO_mode = 0; 2398 mode = 0; 2399 } else { 2400 drvp->PIO_mode = mode; 2401 drvp->DMA_mode = mode - 2; 2402 } 2403 datatim_reg |= 2404 AMD756_DATATIM_PULSE(chp->channel, drive, 2405 amd756_pio_set[mode]) | 2406 AMD756_DATATIM_RECOV(chp->channel, drive, 2407 amd756_pio_rec[mode]); 2408 } 2409 if (idedma_ctl != 0) { 2410 /* Add software bits in status register */ 2411 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2412 IDEDMA_CTL(chp->channel), 2413 idedma_ctl); 2414 } 2415 pciide_print_modes(cp); 2416 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 2417 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 2418 } 2419 2420 void 2421 apollo_chip_map(sc, pa) 2422 struct pciide_softc *sc; 2423 struct pci_attach_args *pa; 2424 { 2425 struct pciide_channel *cp; 2426 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2427 int channel; 2428 u_int32_t ideconf; 2429 bus_size_t cmdsize, ctlsize; 2430 pcitag_t pcib_tag; 2431 pcireg_t pcib_id, pcib_class; 2432 2433 if (pciide_chipen(sc, pa) == 0) 2434 return; 2435 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 2436 2437 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG); 2438 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG); 2439 2440 switch (PCI_PRODUCT(pcib_id)) { 2441 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 2442 if (PCI_REVISION(pcib_class) >= 0x02) { 2443 printf(": ATA33"); 2444 sc->sc_wdcdev.UDMA_cap = 2; 2445 } else { 2446 printf(": DMA"); 2447 sc->sc_wdcdev.UDMA_cap = 0; 2448 } 2449 break; 2450 case PCI_PRODUCT_VIATECH_VT82C596A: 2451 if (PCI_REVISION(pcib_class) >= 0x12) { 2452 printf(": ATA66"); 2453 sc->sc_wdcdev.UDMA_cap = 4; 2454 } else { 2455 printf(": ATA33"); 2456 sc->sc_wdcdev.UDMA_cap = 2; 2457 } 2458 break; 2459 2460 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 2461 if (PCI_REVISION(pcib_class) >= 0x40) { 2462 printf(": ATA100"); 2463 sc->sc_wdcdev.UDMA_cap = 5; 2464 } else { 2465 printf(": ATA66"); 2466 sc->sc_wdcdev.UDMA_cap = 4; 2467 } 2468 break; 2469 case PCI_PRODUCT_VIATECH_VT8231_ISA: 2470 printf(": ATA100"); 2471 sc->sc_wdcdev.UDMA_cap = 5; 2472 break; 2473 case PCI_PRODUCT_VIATECH_VT8366_ISA: 2474 printf(": ATA100"); 2475 sc->sc_wdcdev.UDMA_cap = 5; 2476 break; 2477 case PCI_PRODUCT_VIATECH_VT8233_ISA: 2478 printf(": ATA133"); 2479 sc->sc_wdcdev.UDMA_cap = 6; 2480 break; 2481 case PCI_PRODUCT_VIATECH_VT8235_ISA: 2482 printf(": ATA133"); 2483 sc->sc_wdcdev.UDMA_cap = 6; 2484 break; 2485 case PCI_PRODUCT_VIATECH_VT8237_SATA: 2486 printf(": ATA133"); 2487 sc->sc_wdcdev.UDMA_cap = 6; 2488 break; 2489 default: 2490 printf(": DMA"); 2491 sc->sc_wdcdev.UDMA_cap = 0; 2492 break; 2493 } 2494 2495 pciide_mapreg_dma(sc, pa); 2496 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2497 WDC_CAPABILITY_MODE; 2498 if (sc->sc_dma_ok) { 2499 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2500 sc->sc_wdcdev.irqack = pciide_irqack; 2501 if (sc->sc_wdcdev.UDMA_cap > 0) 2502 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2503 } 2504 sc->sc_wdcdev.PIO_cap = 4; 2505 sc->sc_wdcdev.DMA_cap = 2; 2506 sc->sc_wdcdev.set_modes = apollo_setup_channel; 2507 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2508 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2509 2510 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2511 2512 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 2513 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2514 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 2515 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 2516 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2517 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 2518 DEBUG_PROBE); 2519 2520 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2521 cp = &sc->pciide_channels[channel]; 2522 if (pciide_chansetup(sc, channel, interface) == 0) 2523 continue; 2524 2525 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 2526 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 2527 printf("%s: %s ignored (disabled)\n", 2528 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2529 continue; 2530 } 2531 pciide_map_compat_intr(pa, cp, channel, interface); 2532 if (cp->hw_ok == 0) 2533 continue; 2534 2535 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2536 pciide_pci_intr); 2537 if (cp->hw_ok == 0) { 2538 goto next; 2539 } 2540 if (pciide_chan_candisable(cp)) { 2541 ideconf &= ~APO_IDECONF_EN(channel); 2542 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 2543 ideconf); 2544 } 2545 2546 if (cp->hw_ok == 0) 2547 goto next; 2548 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 2549 next: 2550 if (cp->hw_ok == 0) 2551 pciide_unmap_compat_intr(pa, cp, channel, interface); 2552 } 2553 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2554 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2555 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 2556 } 2557 2558 void 2559 apollo_sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2560 { 2561 struct pciide_channel *cp; 2562 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2563 int channel; 2564 bus_size_t cmdsize, ctlsize; 2565 2566 if (pciide_chipen(sc, pa) == 0) 2567 return; 2568 2569 if (interface == 0) { 2570 WDCDEBUG_PRINT(("apollo_sata_chip_map interface == 0\n"), 2571 DEBUG_PROBE); 2572 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2573 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2574 } 2575 2576 printf(": DMA"); 2577 pciide_mapreg_dma(sc, pa); 2578 printf("\n"); 2579 2580 if (sc->sc_dma_ok) { 2581 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2582 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2583 sc->sc_wdcdev.irqack = pciide_irqack; 2584 } 2585 sc->sc_wdcdev.PIO_cap = 4; 2586 sc->sc_wdcdev.DMA_cap = 2; 2587 sc->sc_wdcdev.UDMA_cap = 6; 2588 2589 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2590 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2591 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2592 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2593 sc->sc_wdcdev.set_modes = sata_setup_channel; 2594 2595 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2596 cp = &sc->pciide_channels[channel]; 2597 if (pciide_chansetup(sc, channel, interface) == 0) 2598 continue; 2599 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2600 pciide_pci_intr); 2601 } 2602 } 2603 2604 void 2605 apollo_setup_channel(chp) 2606 struct channel_softc *chp; 2607 { 2608 u_int32_t udmatim_reg, datatim_reg; 2609 u_int8_t idedma_ctl; 2610 int mode, drive; 2611 struct ata_drive_datas *drvp; 2612 struct pciide_channel *cp = (struct pciide_channel*)chp; 2613 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2614 2615 idedma_ctl = 0; 2616 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 2617 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 2618 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 2619 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 2620 2621 /* setup DMA if needed */ 2622 pciide_channel_dma_setup(cp); 2623 2624 /* 2625 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 2626 * downgrade to Ultra/33 if needed 2627 */ 2628 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 2629 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 2630 /* both drives UDMA */ 2631 if (chp->ch_drive[0].UDMA_mode > 2 && 2632 chp->ch_drive[1].UDMA_mode <= 2) { 2633 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 2634 chp->ch_drive[0].UDMA_mode = 2; 2635 } else if (chp->ch_drive[1].UDMA_mode > 2 && 2636 chp->ch_drive[0].UDMA_mode <= 2) { 2637 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 2638 chp->ch_drive[1].UDMA_mode = 2; 2639 } 2640 } 2641 2642 for (drive = 0; drive < 2; drive++) { 2643 drvp = &chp->ch_drive[drive]; 2644 /* If no drive, skip */ 2645 if ((drvp->drive_flags & DRIVE) == 0) 2646 continue; 2647 /* add timing values, setup DMA if needed */ 2648 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2649 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2650 mode = drvp->PIO_mode; 2651 goto pio; 2652 } 2653 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2654 (drvp->drive_flags & DRIVE_UDMA)) { 2655 /* use Ultra/DMA */ 2656 drvp->drive_flags &= ~DRIVE_DMA; 2657 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2658 APO_UDMA_EN_MTH(chp->channel, drive); 2659 if (sc->sc_wdcdev.UDMA_cap == 6) { 2660 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2661 drive, apollo_udma133_tim[drvp->UDMA_mode]); 2662 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 2663 /* 686b */ 2664 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2665 drive, apollo_udma100_tim[drvp->UDMA_mode]); 2666 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 2667 /* 596b or 686a */ 2668 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 2669 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2670 drive, apollo_udma66_tim[drvp->UDMA_mode]); 2671 } else { 2672 /* 596a or 586b */ 2673 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2674 drive, apollo_udma33_tim[drvp->UDMA_mode]); 2675 } 2676 /* can use PIO timings, MW DMA unused */ 2677 mode = drvp->PIO_mode; 2678 } else { 2679 /* use Multiword DMA */ 2680 drvp->drive_flags &= ~DRIVE_UDMA; 2681 /* mode = min(pio, dma+2) */ 2682 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2683 mode = drvp->PIO_mode; 2684 else 2685 mode = drvp->DMA_mode + 2; 2686 } 2687 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2688 2689 pio: /* setup PIO mode */ 2690 if (mode <= 2) { 2691 drvp->DMA_mode = 0; 2692 drvp->PIO_mode = 0; 2693 mode = 0; 2694 } else { 2695 drvp->PIO_mode = mode; 2696 drvp->DMA_mode = mode - 2; 2697 } 2698 datatim_reg |= 2699 APO_DATATIM_PULSE(chp->channel, drive, 2700 apollo_pio_set[mode]) | 2701 APO_DATATIM_RECOV(chp->channel, drive, 2702 apollo_pio_rec[mode]); 2703 } 2704 if (idedma_ctl != 0) { 2705 /* Add software bits in status register */ 2706 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2707 IDEDMA_CTL(chp->channel), 2708 idedma_ctl); 2709 } 2710 pciide_print_modes(cp); 2711 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2712 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2713 } 2714 2715 void 2716 cmd_channel_map(pa, sc, channel) 2717 struct pci_attach_args *pa; 2718 struct pciide_softc *sc; 2719 int channel; 2720 { 2721 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2722 bus_size_t cmdsize, ctlsize; 2723 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2724 pcireg_t interface; 2725 int one_channel; 2726 2727 /* 2728 * The 0648/0649 can be told to identify as a RAID controller. 2729 * In this case, we have to fake interface 2730 */ 2731 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2732 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2733 PCIIDE_INTERFACE_SETTABLE(1); 2734 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2735 CMD_CONF_DSA1) 2736 interface |= PCIIDE_INTERFACE_PCI(0) | 2737 PCIIDE_INTERFACE_PCI(1); 2738 } else { 2739 interface = PCI_INTERFACE(pa->pa_class); 2740 } 2741 2742 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2743 cp->name = PCIIDE_CHANNEL_NAME(channel); 2744 cp->wdc_channel.channel = channel; 2745 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2746 2747 /* 2748 * Older CMD64X doesn't have independant channels 2749 */ 2750 switch (sc->sc_pp->ide_product) { 2751 case PCI_PRODUCT_CMDTECH_649: 2752 one_channel = 0; 2753 break; 2754 default: 2755 one_channel = 1; 2756 break; 2757 } 2758 2759 if (channel > 0 && one_channel) { 2760 cp->wdc_channel.ch_queue = 2761 sc->pciide_channels[0].wdc_channel.ch_queue; 2762 } else { 2763 cp->wdc_channel.ch_queue = 2764 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2765 } 2766 if (cp->wdc_channel.ch_queue == NULL) { 2767 printf( 2768 "%s: %s cannot allocate memory for command queue", 2769 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2770 return; 2771 } 2772 2773 /* 2774 * with a CMD PCI64x, if we get here, the first channel is enabled: 2775 * there's no way to disable the first channel without disabling 2776 * the whole device 2777 */ 2778 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2779 printf("%s: %s ignored (disabled)\n", 2780 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2781 return; 2782 } 2783 cp->hw_ok = 1; 2784 pciide_map_compat_intr(pa, cp, channel, interface); 2785 if (cp->hw_ok == 0) 2786 return; 2787 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2788 if (cp->hw_ok == 0) { 2789 pciide_unmap_compat_intr(pa, cp, channel, interface); 2790 return; 2791 } 2792 if (pciide_chan_candisable(cp)) { 2793 if (channel == 1) { 2794 ctrl &= ~CMD_CTRL_2PORT; 2795 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2796 CMD_CTRL, ctrl); 2797 pciide_unmap_compat_intr(pa, cp, channel, interface); 2798 } 2799 } 2800 } 2801 2802 int 2803 cmd_pci_intr(arg) 2804 void *arg; 2805 { 2806 struct pciide_softc *sc = arg; 2807 struct pciide_channel *cp; 2808 struct channel_softc *wdc_cp; 2809 int i, rv, crv; 2810 u_int32_t priirq, secirq; 2811 2812 rv = 0; 2813 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2814 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2815 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2816 cp = &sc->pciide_channels[i]; 2817 wdc_cp = &cp->wdc_channel; 2818 /* If a compat channel skip. */ 2819 if (cp->compat) 2820 continue; 2821 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2822 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2823 crv = wdcintr(wdc_cp); 2824 if (crv == 0) { 2825 #if 0 2826 printf("%s:%d: bogus intr\n", 2827 sc->sc_wdcdev.sc_dev.dv_xname, i); 2828 #endif 2829 } else 2830 rv = 1; 2831 } 2832 } 2833 return rv; 2834 } 2835 2836 void 2837 cmd_chip_map(sc, pa) 2838 struct pciide_softc *sc; 2839 struct pci_attach_args *pa; 2840 { 2841 int channel; 2842 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2843 /* 2844 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2845 * and base address registers can be disabled at 2846 * hardware level. In this case, the device is wired 2847 * in compat mode and its first channel is always enabled, 2848 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2849 * In fact, it seems that the first channel of the CMD PCI0640 2850 * can't be disabled. 2851 */ 2852 2853 #ifdef PCIIDE_CMD064x_DISABLE 2854 if (pciide_chipen(sc, pa) == 0) 2855 return; 2856 #endif 2857 2858 printf(": no DMA"); 2859 sc->sc_dma_ok = 0; 2860 2861 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2862 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2863 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2864 2865 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2866 2867 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2868 cmd_channel_map(pa, sc, channel); 2869 } 2870 } 2871 2872 void 2873 cmd0643_9_chip_map(sc, pa) 2874 struct pciide_softc *sc; 2875 struct pci_attach_args *pa; 2876 { 2877 struct pciide_channel *cp; 2878 int channel; 2879 int rev = PCI_REVISION( 2880 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2881 pcireg_t interface; 2882 2883 /* 2884 * The 0648/0649 can be told to identify as a RAID controller. 2885 * In this case, we have to fake interface 2886 */ 2887 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2888 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2889 PCIIDE_INTERFACE_SETTABLE(1); 2890 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2891 CMD_CONF_DSA1) 2892 interface |= PCIIDE_INTERFACE_PCI(0) | 2893 PCIIDE_INTERFACE_PCI(1); 2894 } else { 2895 interface = PCI_INTERFACE(pa->pa_class); 2896 } 2897 2898 /* 2899 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2900 * and base address registers can be disabled at 2901 * hardware level. In this case, the device is wired 2902 * in compat mode and its first channel is always enabled, 2903 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2904 * In fact, it seems that the first channel of the CMD PCI0640 2905 * can't be disabled. 2906 */ 2907 2908 #ifdef PCIIDE_CMD064x_DISABLE 2909 if (pciide_chipen(sc, pa) == 0) 2910 return; 2911 #endif 2912 printf(": DMA"); 2913 pciide_mapreg_dma(sc, pa); 2914 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2915 WDC_CAPABILITY_MODE; 2916 if (sc->sc_dma_ok) { 2917 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2918 switch (sc->sc_pp->ide_product) { 2919 case PCI_PRODUCT_CMDTECH_649: 2920 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2921 sc->sc_wdcdev.UDMA_cap = 5; 2922 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2923 break; 2924 case PCI_PRODUCT_CMDTECH_648: 2925 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2926 sc->sc_wdcdev.UDMA_cap = 4; 2927 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2928 break; 2929 case PCI_PRODUCT_CMDTECH_646: 2930 if (rev >= CMD0646U2_REV) { 2931 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2932 sc->sc_wdcdev.UDMA_cap = 2; 2933 } else if (rev >= CMD0646U_REV) { 2934 /* 2935 * Linux's driver claims that the 646U is broken 2936 * with UDMA. Only enable it if we know what we're 2937 * doing 2938 */ 2939 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2940 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2941 sc->sc_wdcdev.UDMA_cap = 2; 2942 #endif 2943 /* explicitly disable UDMA */ 2944 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2945 CMD_UDMATIM(0), 0); 2946 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2947 CMD_UDMATIM(1), 0); 2948 } 2949 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2950 break; 2951 default: 2952 sc->sc_wdcdev.irqack = pciide_irqack; 2953 } 2954 } 2955 2956 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2957 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2958 sc->sc_wdcdev.PIO_cap = 4; 2959 sc->sc_wdcdev.DMA_cap = 2; 2960 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2961 2962 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2963 2964 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2965 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2966 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2967 DEBUG_PROBE); 2968 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2969 cp = &sc->pciide_channels[channel]; 2970 cmd_channel_map(pa, sc, channel); 2971 if (cp->hw_ok == 0) 2972 continue; 2973 cmd0643_9_setup_channel(&cp->wdc_channel); 2974 } 2975 /* 2976 * note - this also makes sure we clear the irq disable and reset 2977 * bits 2978 */ 2979 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2980 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2981 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2982 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2983 DEBUG_PROBE); 2984 } 2985 2986 void 2987 cmd0643_9_setup_channel(chp) 2988 struct channel_softc *chp; 2989 { 2990 struct ata_drive_datas *drvp; 2991 u_int8_t tim; 2992 u_int32_t idedma_ctl, udma_reg; 2993 int drive; 2994 struct pciide_channel *cp = (struct pciide_channel*)chp; 2995 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2996 2997 idedma_ctl = 0; 2998 /* setup DMA if needed */ 2999 pciide_channel_dma_setup(cp); 3000 3001 for (drive = 0; drive < 2; drive++) { 3002 drvp = &chp->ch_drive[drive]; 3003 /* If no drive, skip */ 3004 if ((drvp->drive_flags & DRIVE) == 0) 3005 continue; 3006 /* add timing values, setup DMA if needed */ 3007 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3008 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3009 if (drvp->drive_flags & DRIVE_UDMA) { 3010 /* UltraDMA on a 646U2, 0648 or 0649 */ 3011 drvp->drive_flags &= ~DRIVE_DMA; 3012 udma_reg = pciide_pci_read(sc->sc_pc, 3013 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3014 if (drvp->UDMA_mode > 2 && 3015 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3016 CMD_BICSR) & 3017 CMD_BICSR_80(chp->channel)) == 0) { 3018 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3019 "80-wire cable not detected\n", 3020 drvp->drive_name, 3021 sc->sc_wdcdev.sc_dev.dv_xname, 3022 chp->channel, drive), DEBUG_PROBE); 3023 drvp->UDMA_mode = 2; 3024 } 3025 if (drvp->UDMA_mode > 2) 3026 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 3027 else if (sc->sc_wdcdev.UDMA_cap > 2) 3028 udma_reg |= CMD_UDMATIM_UDMA33(drive); 3029 udma_reg |= CMD_UDMATIM_UDMA(drive); 3030 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 3031 CMD_UDMATIM_TIM_OFF(drive)); 3032 udma_reg |= 3033 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 3034 CMD_UDMATIM_TIM_OFF(drive)); 3035 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3036 CMD_UDMATIM(chp->channel), udma_reg); 3037 } else { 3038 /* 3039 * use Multiword DMA. 3040 * Timings will be used for both PIO and DMA, 3041 * so adjust DMA mode if needed 3042 * if we have a 0646U2/8/9, turn off UDMA 3043 */ 3044 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 3045 udma_reg = pciide_pci_read(sc->sc_pc, 3046 sc->sc_tag, 3047 CMD_UDMATIM(chp->channel)); 3048 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 3049 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3050 CMD_UDMATIM(chp->channel), 3051 udma_reg); 3052 } 3053 if (drvp->PIO_mode >= 3 && 3054 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3055 drvp->DMA_mode = drvp->PIO_mode - 2; 3056 } 3057 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 3058 } 3059 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3060 } 3061 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3062 CMD_DATA_TIM(chp->channel, drive), tim); 3063 } 3064 if (idedma_ctl != 0) { 3065 /* Add software bits in status register */ 3066 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3067 IDEDMA_CTL(chp->channel), 3068 idedma_ctl); 3069 } 3070 pciide_print_modes(cp); 3071 #ifdef __sparc64__ 3072 /* 3073 * The Ultra 5 has a tendency to hang during reboot. This is due 3074 * to the PCI0646U asserting a PCI interrupt line when the chip 3075 * registers claim that it is not. Performing a reset at this 3076 * point appears to eliminate the symptoms. It is likely the 3077 * real cause is still lurking somewhere in the code. 3078 */ 3079 wdcreset(chp, SILENT); 3080 #endif /* __sparc64__ */ 3081 } 3082 3083 void 3084 cmd646_9_irqack(chp) 3085 struct channel_softc *chp; 3086 { 3087 u_int32_t priirq, secirq; 3088 struct pciide_channel *cp = (struct pciide_channel*)chp; 3089 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3090 3091 if (chp->channel == 0) { 3092 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3093 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 3094 } else { 3095 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3096 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 3097 } 3098 pciide_irqack(chp); 3099 } 3100 3101 void 3102 cmd680_chip_map(sc, pa) 3103 struct pciide_softc *sc; 3104 struct pci_attach_args *pa; 3105 { 3106 struct pciide_channel *cp; 3107 int channel; 3108 3109 if (pciide_chipen(sc, pa) == 0) 3110 return; 3111 printf("\n%s: bus-master DMA support present", 3112 sc->sc_wdcdev.sc_dev.dv_xname); 3113 pciide_mapreg_dma(sc, pa); 3114 printf("\n"); 3115 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3116 WDC_CAPABILITY_MODE; 3117 if (sc->sc_dma_ok) { 3118 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3119 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3120 sc->sc_wdcdev.UDMA_cap = 6; 3121 sc->sc_wdcdev.irqack = pciide_irqack; 3122 } 3123 3124 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3125 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3126 sc->sc_wdcdev.PIO_cap = 4; 3127 sc->sc_wdcdev.DMA_cap = 2; 3128 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 3129 3130 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 3131 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 3132 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 3133 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 3134 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3135 cp = &sc->pciide_channels[channel]; 3136 cmd680_channel_map(pa, sc, channel); 3137 if (cp->hw_ok == 0) 3138 continue; 3139 cmd680_setup_channel(&cp->wdc_channel); 3140 } 3141 } 3142 3143 void 3144 cmd680_channel_map(pa, sc, channel) 3145 struct pci_attach_args *pa; 3146 struct pciide_softc *sc; 3147 int channel; 3148 { 3149 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3150 bus_size_t cmdsize, ctlsize; 3151 int interface, i, reg; 3152 static const u_int8_t init_val[] = 3153 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 3154 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 3155 3156 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3157 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3158 PCIIDE_INTERFACE_SETTABLE(1); 3159 interface |= PCIIDE_INTERFACE_PCI(0) | 3160 PCIIDE_INTERFACE_PCI(1); 3161 } else { 3162 interface = PCI_INTERFACE(pa->pa_class); 3163 } 3164 3165 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3166 cp->name = PCIIDE_CHANNEL_NAME(channel); 3167 cp->wdc_channel.channel = channel; 3168 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3169 3170 cp->wdc_channel.ch_queue = 3171 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3172 if (cp->wdc_channel.ch_queue == NULL) { 3173 printf("%s %s: " 3174 "can't allocate memory for command queue", 3175 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3176 return; 3177 } 3178 3179 /* XXX */ 3180 reg = 0xa2 + channel * 16; 3181 for (i = 0; i < sizeof(init_val); i++) 3182 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 3183 3184 printf("%s: %s %s to %s mode\n", 3185 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 3186 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 3187 "configured" : "wired", 3188 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 3189 "native-PCI" : "compatibility"); 3190 3191 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 3192 if (cp->hw_ok == 0) 3193 return; 3194 pciide_map_compat_intr(pa, cp, channel, interface); 3195 } 3196 3197 void 3198 cmd680_setup_channel(chp) 3199 struct channel_softc *chp; 3200 { 3201 struct ata_drive_datas *drvp; 3202 u_int8_t mode, off, scsc; 3203 u_int16_t val; 3204 u_int32_t idedma_ctl; 3205 int drive; 3206 struct pciide_channel *cp = (struct pciide_channel*)chp; 3207 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3208 pci_chipset_tag_t pc = sc->sc_pc; 3209 pcitag_t pa = sc->sc_tag; 3210 static const u_int8_t udma2_tbl[] = 3211 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 3212 static const u_int8_t udma_tbl[] = 3213 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 3214 static const u_int16_t dma_tbl[] = 3215 { 0x2208, 0x10c2, 0x10c1 }; 3216 static const u_int16_t pio_tbl[] = 3217 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 3218 3219 idedma_ctl = 0; 3220 pciide_channel_dma_setup(cp); 3221 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 3222 3223 for (drive = 0; drive < 2; drive++) { 3224 drvp = &chp->ch_drive[drive]; 3225 /* If no drive, skip */ 3226 if ((drvp->drive_flags & DRIVE) == 0) 3227 continue; 3228 mode &= ~(0x03 << (drive * 4)); 3229 if (drvp->drive_flags & DRIVE_UDMA) { 3230 drvp->drive_flags &= ~DRIVE_DMA; 3231 off = 0xa0 + chp->channel * 16; 3232 if (drvp->UDMA_mode > 2 && 3233 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 3234 drvp->UDMA_mode = 2; 3235 scsc = pciide_pci_read(pc, pa, 0x8a); 3236 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 3237 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 3238 scsc = pciide_pci_read(pc, pa, 0x8a); 3239 if ((scsc & 0x30) == 0) 3240 drvp->UDMA_mode = 5; 3241 } 3242 mode |= 0x03 << (drive * 4); 3243 off = 0xac + chp->channel * 16 + drive * 2; 3244 val = pciide_pci_read(pc, pa, off) & ~0x3f; 3245 if (scsc & 0x30) 3246 val |= udma2_tbl[drvp->UDMA_mode]; 3247 else 3248 val |= udma_tbl[drvp->UDMA_mode]; 3249 pciide_pci_write(pc, pa, off, val); 3250 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3251 } else if (drvp->drive_flags & DRIVE_DMA) { 3252 mode |= 0x02 << (drive * 4); 3253 off = 0xa8 + chp->channel * 16 + drive * 2; 3254 val = dma_tbl[drvp->DMA_mode]; 3255 pciide_pci_write(pc, pa, off, val & 0xff); 3256 pciide_pci_write(pc, pa, off, val >> 8); 3257 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3258 } else { 3259 mode |= 0x01 << (drive * 4); 3260 off = 0xa4 + chp->channel * 16 + drive * 2; 3261 val = pio_tbl[drvp->PIO_mode]; 3262 pciide_pci_write(pc, pa, off, val & 0xff); 3263 pciide_pci_write(pc, pa, off, val >> 8); 3264 } 3265 } 3266 3267 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 3268 if (idedma_ctl != 0) { 3269 /* Add software bits in status register */ 3270 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3271 IDEDMA_CTL(chp->channel), 3272 idedma_ctl); 3273 } 3274 pciide_print_modes(cp); 3275 } 3276 3277 void 3278 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3279 { 3280 struct pciide_channel *cp; 3281 bus_size_t cmdsize, ctlsize; 3282 pcireg_t interface; 3283 int channel; 3284 3285 if (pciide_chipen(sc, pa) == 0) 3286 return; 3287 3288 printf(": DMA"); 3289 pciide_mapreg_dma(sc, pa); 3290 3291 /* 3292 * Rev. <= 0x01 of the 3112 have a bug that can cause data 3293 * corruption if DMA transfers cross an 8K boundary. This is 3294 * apparently hard to tickle, but we'll go ahead and play it 3295 * safe. 3296 */ 3297 if (PCI_REVISION(pa->pa_class) <= 0x01) { 3298 sc->sc_dma_maxsegsz = 8192; 3299 sc->sc_dma_boundary = 8192; 3300 } 3301 3302 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3303 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 3304 sc->sc_wdcdev.PIO_cap = 4; 3305 if (sc->sc_dma_ok) { 3306 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3307 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3308 sc->sc_wdcdev.irqack = pciide_irqack; 3309 sc->sc_wdcdev.DMA_cap = 2; 3310 sc->sc_wdcdev.UDMA_cap = 6; 3311 } 3312 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 3313 3314 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3315 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3316 3317 /* 3318 * The 3112 can be told to identify as a RAID controller. 3319 * In this case, we have to fake interface 3320 */ 3321 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3322 interface = PCI_INTERFACE(pa->pa_class); 3323 } else { 3324 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3325 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3326 } 3327 3328 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3329 3330 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3331 cp = &sc->pciide_channels[channel]; 3332 if (pciide_chansetup(sc, channel, interface) == 0) 3333 continue; 3334 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3335 pciide_pci_intr); 3336 if (cp->hw_ok == 0) 3337 continue; 3338 pciide_map_compat_intr(pa, cp, channel, interface); 3339 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 3340 } 3341 } 3342 3343 void 3344 sii3112_setup_channel(struct channel_softc *chp) 3345 { 3346 struct ata_drive_datas *drvp; 3347 int drive; 3348 u_int32_t idedma_ctl, dtm; 3349 struct pciide_channel *cp = (struct pciide_channel*)chp; 3350 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc; 3351 3352 /* setup DMA if needed */ 3353 pciide_channel_dma_setup(cp); 3354 3355 idedma_ctl = 0; 3356 dtm = 0; 3357 3358 for (drive = 0; drive < 2; drive++) { 3359 drvp = &chp->ch_drive[drive]; 3360 /* If no drive, skip */ 3361 if ((drvp->drive_flags & DRIVE) == 0) 3362 continue; 3363 if (drvp->drive_flags & DRIVE_UDMA) { 3364 /* use Ultra/DMA */ 3365 drvp->drive_flags &= ~DRIVE_DMA; 3366 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3367 dtm |= DTM_IDEx_DMA; 3368 } else if (drvp->drive_flags & DRIVE_DMA) { 3369 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3370 dtm |= DTM_IDEx_DMA; 3371 } else { 3372 dtm |= DTM_IDEx_PIO; 3373 } 3374 } 3375 3376 /* 3377 * Nothing to do to setup modes; it is meaningless in S-ATA 3378 * (but many S-ATA drives still want to get the SET_FEATURE 3379 * command). 3380 */ 3381 if (idedma_ctl != 0) { 3382 /* Add software bits in status register */ 3383 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3384 IDEDMA_CTL(chp->channel), idedma_ctl); 3385 } 3386 pci_conf_write(sc->sc_pc, sc->sc_tag, 3387 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm); 3388 pciide_print_modes(cp); 3389 } 3390 3391 void 3392 cy693_chip_map(sc, pa) 3393 struct pciide_softc *sc; 3394 struct pci_attach_args *pa; 3395 { 3396 struct pciide_channel *cp; 3397 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3398 bus_size_t cmdsize, ctlsize; 3399 3400 if (pciide_chipen(sc, pa) == 0) 3401 return; 3402 /* 3403 * this chip has 2 PCI IDE functions, one for primary and one for 3404 * secondary. So we need to call pciide_mapregs_compat() with 3405 * the real channel 3406 */ 3407 if (pa->pa_function == 1) { 3408 sc->sc_cy_compatchan = 0; 3409 } else if (pa->pa_function == 2) { 3410 sc->sc_cy_compatchan = 1; 3411 } else { 3412 printf(": unexpected PCI function %d\n", pa->pa_function); 3413 return; 3414 } 3415 3416 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 3417 printf(": DMA"); 3418 pciide_mapreg_dma(sc, pa); 3419 } else { 3420 printf(": no DMA"); 3421 sc->sc_dma_ok = 0; 3422 } 3423 3424 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 3425 if (sc->sc_cy_handle == NULL) { 3426 printf(", (unable to map ctl registers)"); 3427 sc->sc_dma_ok = 0; 3428 } 3429 3430 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3431 WDC_CAPABILITY_MODE; 3432 if (sc->sc_dma_ok) { 3433 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3434 sc->sc_wdcdev.irqack = pciide_irqack; 3435 } 3436 sc->sc_wdcdev.PIO_cap = 4; 3437 sc->sc_wdcdev.DMA_cap = 2; 3438 sc->sc_wdcdev.set_modes = cy693_setup_channel; 3439 3440 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3441 sc->sc_wdcdev.nchannels = 1; 3442 3443 /* Only one channel for this chip; if we are here it's enabled */ 3444 cp = &sc->pciide_channels[0]; 3445 sc->wdc_chanarray[0] = &cp->wdc_channel; 3446 cp->name = PCIIDE_CHANNEL_NAME(0); 3447 cp->wdc_channel.channel = 0; 3448 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3449 cp->wdc_channel.ch_queue = 3450 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3451 if (cp->wdc_channel.ch_queue == NULL) { 3452 printf(": cannot allocate memory for command queue\n"); 3453 return; 3454 } 3455 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 3456 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 3457 "configured" : "wired"); 3458 if (interface & PCIIDE_INTERFACE_PCI(0)) { 3459 printf("native-PCI\n"); 3460 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 3461 pciide_pci_intr); 3462 } else { 3463 printf("compatibility\n"); 3464 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 3465 &cmdsize, &ctlsize); 3466 } 3467 3468 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 3469 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 3470 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 3471 if (cp->hw_ok == 0) 3472 return; 3473 wdcattach(&cp->wdc_channel); 3474 if (pciide_chan_candisable(cp)) { 3475 pci_conf_write(sc->sc_pc, sc->sc_tag, 3476 PCI_COMMAND_STATUS_REG, 0); 3477 } 3478 if (cp->hw_ok == 0) { 3479 pciide_unmap_compat_intr(pa, cp, sc->sc_cy_compatchan, 3480 interface); 3481 return; 3482 } 3483 3484 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 3485 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 3486 cy693_setup_channel(&cp->wdc_channel); 3487 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 3488 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 3489 } 3490 3491 void 3492 cy693_setup_channel(chp) 3493 struct channel_softc *chp; 3494 { 3495 struct ata_drive_datas *drvp; 3496 int drive; 3497 u_int32_t cy_cmd_ctrl; 3498 u_int32_t idedma_ctl; 3499 struct pciide_channel *cp = (struct pciide_channel*)chp; 3500 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3501 int dma_mode = -1; 3502 3503 cy_cmd_ctrl = idedma_ctl = 0; 3504 3505 /* setup DMA if needed */ 3506 pciide_channel_dma_setup(cp); 3507 3508 for (drive = 0; drive < 2; drive++) { 3509 drvp = &chp->ch_drive[drive]; 3510 /* If no drive, skip */ 3511 if ((drvp->drive_flags & DRIVE) == 0) 3512 continue; 3513 /* add timing values, setup DMA if needed */ 3514 if (drvp->drive_flags & DRIVE_DMA) { 3515 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3516 /* use Multiword DMA */ 3517 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 3518 dma_mode = drvp->DMA_mode; 3519 } 3520 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 3521 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 3522 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 3523 CY_CMD_CTRL_IOW_REC_OFF(drive)); 3524 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 3525 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 3526 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 3527 CY_CMD_CTRL_IOR_REC_OFF(drive)); 3528 } 3529 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 3530 chp->ch_drive[0].DMA_mode = dma_mode; 3531 chp->ch_drive[1].DMA_mode = dma_mode; 3532 3533 if (dma_mode == -1) 3534 dma_mode = 0; 3535 3536 if (sc->sc_cy_handle != NULL) { 3537 /* Note: `multiple' is implied. */ 3538 cy82c693_write(sc->sc_cy_handle, 3539 (sc->sc_cy_compatchan == 0) ? 3540 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 3541 } 3542 3543 pciide_print_modes(cp); 3544 3545 if (idedma_ctl != 0) { 3546 /* Add software bits in status register */ 3547 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3548 IDEDMA_CTL(chp->channel), idedma_ctl); 3549 } 3550 } 3551 3552 static struct sis_hostbr_type { 3553 u_int16_t id; 3554 u_int8_t rev; 3555 u_int8_t udma_mode; 3556 char *name; 3557 u_int8_t type; 3558 #define SIS_TYPE_NOUDMA 0 3559 #define SIS_TYPE_66 1 3560 #define SIS_TYPE_100OLD 2 3561 #define SIS_TYPE_100NEW 3 3562 #define SIS_TYPE_133OLD 4 3563 #define SIS_TYPE_133NEW 5 3564 #define SIS_TYPE_SOUTH 6 3565 } sis_hostbr_type[] = { 3566 /* Most infos here are from sos@freebsd.org */ 3567 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 3568 #if 0 3569 /* 3570 * controllers associated to a rev 0x2 530 Host to PCI Bridge 3571 * have problems with UDMA (info provided by Christos) 3572 */ 3573 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 3574 #endif 3575 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 3576 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 3577 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 3578 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 3579 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 3580 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 3581 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 3582 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 3583 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 3584 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 3585 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 3586 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 3587 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 3588 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 3589 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 3590 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 3591 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 3592 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 3593 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 3594 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 3595 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 3596 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 3597 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 3598 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 3599 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 3600 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 3601 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 3602 /* 3603 * From sos@freebsd.org: the 0x961 ID will never be found in real world 3604 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 3605 */ 3606 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 3607 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW} 3608 }; 3609 3610 static struct sis_hostbr_type *sis_hostbr_type_match; 3611 3612 int 3613 sis_hostbr_match(struct pci_attach_args *pa) 3614 { 3615 int i; 3616 3617 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 3618 return (0); 3619 sis_hostbr_type_match = NULL; 3620 for (i = 0; 3621 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 3622 i++) { 3623 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 3624 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 3625 sis_hostbr_type_match = &sis_hostbr_type[i]; 3626 } 3627 return (sis_hostbr_type_match != NULL); 3628 } 3629 3630 int 3631 sis_south_match(struct pci_attach_args *pa) 3632 { 3633 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 3634 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 3635 PCI_REVISION(pa->pa_class) >= 0x10); 3636 } 3637 3638 void 3639 sis_chip_map(sc, pa) 3640 struct pciide_softc *sc; 3641 struct pci_attach_args *pa; 3642 { 3643 struct pciide_channel *cp; 3644 int channel; 3645 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 3646 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3647 pcireg_t rev = PCI_REVISION(pa->pa_class); 3648 bus_size_t cmdsize, ctlsize; 3649 pcitag_t br_tag; 3650 struct pci_attach_args br_pa; 3651 3652 if (pciide_chipen(sc, pa) == 0) 3653 return; 3654 3655 /* Find PCI bridge (dev 0 func 0 on the same bus) */ 3656 br_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 0, 0); 3657 br_pa.pa_id = pci_conf_read(sc->sc_pc, br_tag, PCI_ID_REG); 3658 br_pa.pa_class = pci_conf_read(sc->sc_pc, br_tag, PCI_CLASS_REG); 3659 WDCDEBUG_PRINT(("%s: PCI bridge pa_id=0x%x pa_class=0x%x\n", 3660 __func__, br_pa.pa_id, br_pa.pa_class), DEBUG_PROBE); 3661 3662 if (sis_hostbr_match(&br_pa)) { 3663 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 3664 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 3665 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3666 SIS_REG_57) & 0x7f); 3667 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 3668 PCI_ID_REG)) == SIS_PRODUCT_5518) { 3669 sc->sis_type = SIS_TYPE_133NEW; 3670 sc->sc_wdcdev.UDMA_cap = 3671 sis_hostbr_type_match->udma_mode; 3672 } else { 3673 /* Find ISA bridge (func 0 of the same dev) */ 3674 br_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 3675 pa->pa_device, 0); 3676 br_pa.pa_id = pci_conf_read(sc->sc_pc, 3677 br_tag, PCI_ID_REG); 3678 br_pa.pa_class = pci_conf_read(sc->sc_pc, 3679 br_tag, PCI_CLASS_REG); 3680 WDCDEBUG_PRINT(("%s: ISA bridge " 3681 "pa_id=0x%x pa_class=0x%x\n", 3682 __func__, br_pa.pa_id, br_pa.pa_class), 3683 DEBUG_PROBE); 3684 3685 if (sis_south_match(&br_pa)) { 3686 sc->sis_type = SIS_TYPE_133OLD; 3687 sc->sc_wdcdev.UDMA_cap = 3688 sis_hostbr_type_match->udma_mode; 3689 } else { 3690 sc->sis_type = SIS_TYPE_100NEW; 3691 sc->sc_wdcdev.UDMA_cap = 3692 sis_hostbr_type_match->udma_mode; 3693 } 3694 } 3695 } else { 3696 sc->sis_type = sis_hostbr_type_match->type; 3697 sc->sc_wdcdev.UDMA_cap = 3698 sis_hostbr_type_match->udma_mode; 3699 } 3700 printf(": %s", sis_hostbr_type_match->name); 3701 } else { 3702 printf(": 5597/5598"); 3703 if (rev >= 0xd0) { 3704 sc->sc_wdcdev.UDMA_cap = 2; 3705 sc->sis_type = SIS_TYPE_66; 3706 } else { 3707 sc->sc_wdcdev.UDMA_cap = 0; 3708 sc->sis_type = SIS_TYPE_NOUDMA; 3709 } 3710 } 3711 3712 printf(": DMA"); 3713 pciide_mapreg_dma(sc, pa); 3714 3715 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3716 WDC_CAPABILITY_MODE; 3717 if (sc->sc_dma_ok) { 3718 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3719 sc->sc_wdcdev.irqack = pciide_irqack; 3720 if (sc->sis_type >= SIS_TYPE_66) 3721 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3722 } 3723 3724 sc->sc_wdcdev.PIO_cap = 4; 3725 sc->sc_wdcdev.DMA_cap = 2; 3726 3727 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3728 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3729 switch (sc->sis_type) { 3730 case SIS_TYPE_NOUDMA: 3731 case SIS_TYPE_66: 3732 case SIS_TYPE_100OLD: 3733 sc->sc_wdcdev.set_modes = sis_setup_channel; 3734 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 3735 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 3736 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 3737 break; 3738 case SIS_TYPE_100NEW: 3739 case SIS_TYPE_133OLD: 3740 sc->sc_wdcdev.set_modes = sis_setup_channel; 3741 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 3742 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 3743 break; 3744 case SIS_TYPE_133NEW: 3745 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 3746 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 3747 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 3748 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 3749 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 3750 break; 3751 } 3752 3753 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3754 3755 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3756 cp = &sc->pciide_channels[channel]; 3757 if (pciide_chansetup(sc, channel, interface) == 0) 3758 continue; 3759 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 3760 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 3761 printf("%s: %s ignored (disabled)\n", 3762 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3763 continue; 3764 } 3765 pciide_map_compat_intr(pa, cp, channel, interface); 3766 if (cp->hw_ok == 0) 3767 continue; 3768 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3769 pciide_pci_intr); 3770 if (cp->hw_ok == 0) { 3771 pciide_unmap_compat_intr(pa, cp, channel, interface); 3772 continue; 3773 } 3774 if (pciide_chan_candisable(cp)) { 3775 if (channel == 0) 3776 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 3777 else 3778 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 3779 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 3780 sis_ctr0); 3781 } 3782 if (cp->hw_ok == 0) { 3783 pciide_unmap_compat_intr(pa, cp, channel, interface); 3784 continue; 3785 } 3786 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 3787 } 3788 } 3789 3790 void 3791 sis96x_setup_channel(struct channel_softc *chp) 3792 { 3793 struct ata_drive_datas *drvp; 3794 int drive; 3795 u_int32_t sis_tim; 3796 u_int32_t idedma_ctl; 3797 int regtim; 3798 struct pciide_channel *cp = (struct pciide_channel*)chp; 3799 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3800 3801 sis_tim = 0; 3802 idedma_ctl = 0; 3803 /* setup DMA if needed */ 3804 pciide_channel_dma_setup(cp); 3805 3806 for (drive = 0; drive < 2; drive++) { 3807 regtim = SIS_TIM133( 3808 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 3809 chp->channel, drive); 3810 drvp = &chp->ch_drive[drive]; 3811 /* If no drive, skip */ 3812 if ((drvp->drive_flags & DRIVE) == 0) 3813 continue; 3814 /* add timing values, setup DMA if needed */ 3815 if (drvp->drive_flags & DRIVE_UDMA) { 3816 /* use Ultra/DMA */ 3817 drvp->drive_flags &= ~DRIVE_DMA; 3818 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3819 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 3820 if (drvp->UDMA_mode > 2) 3821 drvp->UDMA_mode = 2; 3822 } 3823 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 3824 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 3825 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3826 } else if (drvp->drive_flags & DRIVE_DMA) { 3827 /* 3828 * use Multiword DMA 3829 * Timings will be used for both PIO and DMA, 3830 * so adjust DMA mode if needed 3831 */ 3832 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3833 drvp->PIO_mode = drvp->DMA_mode + 2; 3834 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3835 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3836 drvp->PIO_mode - 2 : 0; 3837 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 3838 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3839 } else { 3840 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 3841 } 3842 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 3843 "channel %d drive %d: 0x%x (reg 0x%x)\n", 3844 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 3845 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 3846 } 3847 if (idedma_ctl != 0) { 3848 /* Add software bits in status register */ 3849 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3850 IDEDMA_CTL(chp->channel), idedma_ctl); 3851 } 3852 pciide_print_modes(cp); 3853 } 3854 3855 void 3856 sis_setup_channel(chp) 3857 struct channel_softc *chp; 3858 { 3859 struct ata_drive_datas *drvp; 3860 int drive; 3861 u_int32_t sis_tim; 3862 u_int32_t idedma_ctl; 3863 struct pciide_channel *cp = (struct pciide_channel*)chp; 3864 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3865 3866 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 3867 "channel %d 0x%x\n", chp->channel, 3868 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 3869 DEBUG_PROBE); 3870 sis_tim = 0; 3871 idedma_ctl = 0; 3872 /* setup DMA if needed */ 3873 pciide_channel_dma_setup(cp); 3874 3875 for (drive = 0; drive < 2; drive++) { 3876 drvp = &chp->ch_drive[drive]; 3877 /* If no drive, skip */ 3878 if ((drvp->drive_flags & DRIVE) == 0) 3879 continue; 3880 /* add timing values, setup DMA if needed */ 3881 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 3882 (drvp->drive_flags & DRIVE_UDMA) == 0) 3883 goto pio; 3884 3885 if (drvp->drive_flags & DRIVE_UDMA) { 3886 /* use Ultra/DMA */ 3887 drvp->drive_flags &= ~DRIVE_DMA; 3888 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3889 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 3890 if (drvp->UDMA_mode > 2) 3891 drvp->UDMA_mode = 2; 3892 } 3893 switch (sc->sis_type) { 3894 case SIS_TYPE_66: 3895 case SIS_TYPE_100OLD: 3896 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 3897 SIS_TIM66_UDMA_TIME_OFF(drive); 3898 break; 3899 case SIS_TYPE_100NEW: 3900 sis_tim |= 3901 sis_udma100new_tim[drvp->UDMA_mode] << 3902 SIS_TIM100_UDMA_TIME_OFF(drive); 3903 case SIS_TYPE_133OLD: 3904 sis_tim |= 3905 sis_udma133old_tim[drvp->UDMA_mode] << 3906 SIS_TIM100_UDMA_TIME_OFF(drive); 3907 break; 3908 default: 3909 printf("unknown SiS IDE type %d\n", 3910 sc->sis_type); 3911 } 3912 } else { 3913 /* 3914 * use Multiword DMA 3915 * Timings will be used for both PIO and DMA, 3916 * so adjust DMA mode if needed 3917 */ 3918 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3919 drvp->PIO_mode = drvp->DMA_mode + 2; 3920 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3921 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3922 drvp->PIO_mode - 2 : 0; 3923 if (drvp->DMA_mode == 0) 3924 drvp->PIO_mode = 0; 3925 } 3926 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3927 pio: switch (sc->sis_type) { 3928 case SIS_TYPE_NOUDMA: 3929 case SIS_TYPE_66: 3930 case SIS_TYPE_100OLD: 3931 sis_tim |= sis_pio_act[drvp->PIO_mode] << 3932 SIS_TIM66_ACT_OFF(drive); 3933 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 3934 SIS_TIM66_REC_OFF(drive); 3935 break; 3936 case SIS_TYPE_100NEW: 3937 case SIS_TYPE_133OLD: 3938 sis_tim |= sis_pio_act[drvp->PIO_mode] << 3939 SIS_TIM100_ACT_OFF(drive); 3940 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 3941 SIS_TIM100_REC_OFF(drive); 3942 break; 3943 default: 3944 printf("unknown SiS IDE type %d\n", 3945 sc->sis_type); 3946 } 3947 } 3948 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 3949 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 3950 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 3951 if (idedma_ctl != 0) { 3952 /* Add software bits in status register */ 3953 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3954 IDEDMA_CTL(chp->channel), idedma_ctl); 3955 } 3956 pciide_print_modes(cp); 3957 } 3958 3959 void 3960 natsemi_chip_map(sc, pa) 3961 struct pciide_softc *sc; 3962 struct pci_attach_args *pa; 3963 { 3964 struct pciide_channel *cp; 3965 int channel; 3966 pcireg_t interface, ctl; 3967 bus_size_t cmdsize, ctlsize; 3968 3969 if (pciide_chipen(sc, pa) == 0) 3970 return; 3971 3972 printf(": DMA"); 3973 pciide_mapreg_dma(sc, pa); 3974 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3975 3976 if (sc->sc_dma_ok) { 3977 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3978 sc->sc_wdcdev.irqack = natsemi_irqack; 3979 } 3980 3981 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 3982 3983 /* 3984 * Mask off interrupts from both channels, appropriate channel(s) 3985 * will be unmasked later. 3986 */ 3987 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 3988 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 3989 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 3990 3991 sc->sc_wdcdev.PIO_cap = 4; 3992 sc->sc_wdcdev.DMA_cap = 2; 3993 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 3994 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3995 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3996 3997 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 3998 PCI_CLASS_REG)); 3999 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 4000 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4001 4002 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 4003 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 4004 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 4005 ctl &= ~NATSEMI_CTRL1_INTAMASK; 4006 else 4007 ctl |= NATSEMI_CTRL1_INTAMASK; 4008 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 4009 4010 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4011 cp = &sc->pciide_channels[channel]; 4012 if (pciide_chansetup(sc, channel, interface) == 0) 4013 continue; 4014 4015 pciide_map_compat_intr(pa, cp, channel, interface); 4016 if (cp->hw_ok == 0) 4017 continue; 4018 4019 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4020 natsemi_pci_intr); 4021 if (cp->hw_ok == 0) { 4022 pciide_unmap_compat_intr(pa, cp, channel, interface); 4023 continue; 4024 } 4025 natsemi_setup_channel(&cp->wdc_channel); 4026 } 4027 } 4028 4029 void 4030 natsemi_setup_channel(chp) 4031 struct channel_softc *chp; 4032 { 4033 struct ata_drive_datas *drvp; 4034 int drive, ndrives = 0; 4035 u_int32_t idedma_ctl = 0; 4036 struct pciide_channel *cp = (struct pciide_channel*)chp; 4037 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4038 u_int8_t tim; 4039 4040 /* setup DMA if needed */ 4041 pciide_channel_dma_setup(cp); 4042 4043 for (drive = 0; drive < 2; drive++) { 4044 drvp = &chp->ch_drive[drive]; 4045 /* If no drive, skip */ 4046 if ((drvp->drive_flags & DRIVE) == 0) 4047 continue; 4048 4049 ndrives++; 4050 /* add timing values, setup DMA if needed */ 4051 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 4052 tim = natsemi_pio_pulse[drvp->PIO_mode] | 4053 (natsemi_pio_recover[drvp->PIO_mode] << 4); 4054 } else { 4055 /* 4056 * use Multiword DMA 4057 * Timings will be used for both PIO and DMA, 4058 * so adjust DMA mode if needed 4059 */ 4060 if (drvp->PIO_mode >= 3 && 4061 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 4062 drvp->DMA_mode = drvp->PIO_mode - 2; 4063 } 4064 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4065 tim = natsemi_dma_pulse[drvp->DMA_mode] | 4066 (natsemi_dma_recover[drvp->DMA_mode] << 4); 4067 } 4068 4069 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4070 NATSEMI_RTREG(chp->channel, drive), tim); 4071 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4072 NATSEMI_WTREG(chp->channel, drive), tim); 4073 } 4074 if (idedma_ctl != 0) { 4075 /* Add software bits in status register */ 4076 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4077 IDEDMA_CTL(chp->channel), idedma_ctl); 4078 } 4079 if (ndrives > 0) { 4080 /* Unmask the channel if at least one drive is found */ 4081 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 4082 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 4083 ~(NATSEMI_CHMASK(chp->channel))); 4084 } 4085 4086 pciide_print_modes(cp); 4087 4088 /* Go ahead and ack interrupts generated during probe. */ 4089 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4090 IDEDMA_CTL(chp->channel), 4091 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4092 IDEDMA_CTL(chp->channel))); 4093 } 4094 4095 void 4096 natsemi_irqack(chp) 4097 struct channel_softc *chp; 4098 { 4099 struct pciide_channel *cp = (struct pciide_channel*)chp; 4100 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4101 u_int8_t clr; 4102 4103 /* The "clear" bits are in the wrong register *sigh* */ 4104 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4105 IDEDMA_CMD(chp->channel)); 4106 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4107 IDEDMA_CTL(chp->channel)) & 4108 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 4109 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4110 IDEDMA_CMD(chp->channel), clr); 4111 } 4112 4113 int 4114 natsemi_pci_intr(arg) 4115 void *arg; 4116 { 4117 struct pciide_softc *sc = arg; 4118 struct pciide_channel *cp; 4119 struct channel_softc *wdc_cp; 4120 int i, rv, crv; 4121 u_int8_t ide_dmactl, msk; 4122 4123 rv = 0; 4124 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 4125 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4126 cp = &sc->pciide_channels[i]; 4127 wdc_cp = &cp->wdc_channel; 4128 4129 /* If a compat channel skip. */ 4130 if (cp->compat) 4131 continue; 4132 4133 /* If this channel is masked, skip it. */ 4134 if (msk & NATSEMI_CHMASK(i)) 4135 continue; 4136 4137 /* Get intr status */ 4138 ide_dmactl = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4139 IDEDMA_CTL(i)); 4140 4141 if (ide_dmactl & IDEDMA_CTL_ERR) 4142 printf("%s:%d: error intr\n", 4143 sc->sc_wdcdev.sc_dev.dv_xname, i); 4144 4145 if (ide_dmactl & IDEDMA_CTL_INTR) { 4146 crv = wdcintr(wdc_cp); 4147 if (crv == 0) 4148 printf("%s:%d: bogus intr\n", 4149 sc->sc_wdcdev.sc_dev.dv_xname, i); 4150 else 4151 rv = 1; 4152 } 4153 } 4154 return (rv); 4155 } 4156 4157 void 4158 acer_chip_map(sc, pa) 4159 struct pciide_softc *sc; 4160 struct pci_attach_args *pa; 4161 { 4162 struct pciide_channel *cp; 4163 int channel; 4164 pcireg_t cr, interface; 4165 bus_size_t cmdsize, ctlsize; 4166 pcireg_t rev = PCI_REVISION(pa->pa_class); 4167 4168 if (pciide_chipen(sc, pa) == 0) 4169 return; 4170 4171 printf(": DMA"); 4172 pciide_mapreg_dma(sc, pa); 4173 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4174 WDC_CAPABILITY_MODE; 4175 4176 if (sc->sc_dma_ok) { 4177 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 4178 if (rev >= 0x20) { 4179 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4180 if (rev >= 0xC4) 4181 sc->sc_wdcdev.UDMA_cap = 5; 4182 else if (rev >= 0xC2) 4183 sc->sc_wdcdev.UDMA_cap = 4; 4184 else 4185 sc->sc_wdcdev.UDMA_cap = 2; 4186 } 4187 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4188 sc->sc_wdcdev.irqack = pciide_irqack; 4189 } 4190 4191 sc->sc_wdcdev.PIO_cap = 4; 4192 sc->sc_wdcdev.DMA_cap = 2; 4193 sc->sc_wdcdev.set_modes = acer_setup_channel; 4194 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4195 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4196 4197 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 4198 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 4199 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 4200 4201 /* Enable "microsoft register bits" R/W. */ 4202 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 4203 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 4204 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 4205 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 4206 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 4207 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 4208 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 4209 ~ACER_CHANSTATUSREGS_RO); 4210 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 4211 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 4212 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 4213 /* Don't use cr, re-read the real register content instead */ 4214 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 4215 PCI_CLASS_REG)); 4216 4217 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4218 4219 /* From linux: enable "Cable Detection" */ 4220 if (rev >= 0xC2) 4221 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 4222 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 4223 | ACER_0x4B_CDETECT); 4224 4225 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4226 cp = &sc->pciide_channels[channel]; 4227 if (pciide_chansetup(sc, channel, interface) == 0) 4228 continue; 4229 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 4230 printf("%s: %s ignored (disabled)\n", 4231 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4232 continue; 4233 } 4234 pciide_map_compat_intr(pa, cp, channel, interface); 4235 if (cp->hw_ok == 0) 4236 continue; 4237 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4238 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 4239 if (cp->hw_ok == 0) { 4240 pciide_unmap_compat_intr(pa, cp, channel, interface); 4241 continue; 4242 } 4243 if (pciide_chan_candisable(cp)) { 4244 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 4245 pci_conf_write(sc->sc_pc, sc->sc_tag, 4246 PCI_CLASS_REG, cr); 4247 } 4248 if (cp->hw_ok == 0) { 4249 pciide_unmap_compat_intr(pa, cp, channel, interface); 4250 continue; 4251 } 4252 acer_setup_channel(&cp->wdc_channel); 4253 } 4254 } 4255 4256 void 4257 acer_setup_channel(chp) 4258 struct channel_softc *chp; 4259 { 4260 struct ata_drive_datas *drvp; 4261 int drive; 4262 u_int32_t acer_fifo_udma; 4263 u_int32_t idedma_ctl; 4264 struct pciide_channel *cp = (struct pciide_channel*)chp; 4265 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4266 4267 idedma_ctl = 0; 4268 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 4269 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 4270 acer_fifo_udma), DEBUG_PROBE); 4271 /* setup DMA if needed */ 4272 pciide_channel_dma_setup(cp); 4273 4274 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 4275 DRIVE_UDMA) { /* check 80 pins cable */ 4276 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 4277 ACER_0x4A_80PIN(chp->channel)) { 4278 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 4279 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 4280 DEBUG_PROBE); 4281 if (chp->ch_drive[0].UDMA_mode > 2) 4282 chp->ch_drive[0].UDMA_mode = 2; 4283 if (chp->ch_drive[1].UDMA_mode > 2) 4284 chp->ch_drive[1].UDMA_mode = 2; 4285 } 4286 } 4287 4288 for (drive = 0; drive < 2; drive++) { 4289 drvp = &chp->ch_drive[drive]; 4290 /* If no drive, skip */ 4291 if ((drvp->drive_flags & DRIVE) == 0) 4292 continue; 4293 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 4294 "channel %d drive %d 0x%x\n", chp->channel, drive, 4295 pciide_pci_read(sc->sc_pc, sc->sc_tag, 4296 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 4297 /* clear FIFO/DMA mode */ 4298 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 4299 ACER_UDMA_EN(chp->channel, drive) | 4300 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 4301 4302 /* add timing values, setup DMA if needed */ 4303 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 4304 (drvp->drive_flags & DRIVE_UDMA) == 0) { 4305 acer_fifo_udma |= 4306 ACER_FTH_OPL(chp->channel, drive, 0x1); 4307 goto pio; 4308 } 4309 4310 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 4311 if (drvp->drive_flags & DRIVE_UDMA) { 4312 /* use Ultra/DMA */ 4313 drvp->drive_flags &= ~DRIVE_DMA; 4314 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 4315 acer_fifo_udma |= 4316 ACER_UDMA_TIM(chp->channel, drive, 4317 acer_udma[drvp->UDMA_mode]); 4318 /* XXX disable if one drive < UDMA3 ? */ 4319 if (drvp->UDMA_mode >= 3) { 4320 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4321 ACER_0x4B, 4322 pciide_pci_read(sc->sc_pc, sc->sc_tag, 4323 ACER_0x4B) | ACER_0x4B_UDMA66); 4324 } 4325 } else { 4326 /* 4327 * use Multiword DMA 4328 * Timings will be used for both PIO and DMA, 4329 * so adjust DMA mode if needed 4330 */ 4331 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 4332 drvp->PIO_mode = drvp->DMA_mode + 2; 4333 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 4334 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 4335 drvp->PIO_mode - 2 : 0; 4336 if (drvp->DMA_mode == 0) 4337 drvp->PIO_mode = 0; 4338 } 4339 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4340 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 4341 ACER_IDETIM(chp->channel, drive), 4342 acer_pio[drvp->PIO_mode]); 4343 } 4344 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 4345 acer_fifo_udma), DEBUG_PROBE); 4346 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 4347 if (idedma_ctl != 0) { 4348 /* Add software bits in status register */ 4349 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4350 IDEDMA_CTL(chp->channel), idedma_ctl); 4351 } 4352 pciide_print_modes(cp); 4353 } 4354 4355 int 4356 acer_pci_intr(arg) 4357 void *arg; 4358 { 4359 struct pciide_softc *sc = arg; 4360 struct pciide_channel *cp; 4361 struct channel_softc *wdc_cp; 4362 int i, rv, crv; 4363 u_int32_t chids; 4364 4365 rv = 0; 4366 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 4367 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4368 cp = &sc->pciide_channels[i]; 4369 wdc_cp = &cp->wdc_channel; 4370 /* If a compat channel skip. */ 4371 if (cp->compat) 4372 continue; 4373 if (chids & ACER_CHIDS_INT(i)) { 4374 crv = wdcintr(wdc_cp); 4375 if (crv == 0) 4376 printf("%s:%d: bogus intr\n", 4377 sc->sc_wdcdev.sc_dev.dv_xname, i); 4378 else 4379 rv = 1; 4380 } 4381 } 4382 return rv; 4383 } 4384 4385 void 4386 hpt_chip_map(sc, pa) 4387 struct pciide_softc *sc; 4388 struct pci_attach_args *pa; 4389 { 4390 struct pciide_channel *cp; 4391 int i, compatchan, revision; 4392 pcireg_t interface; 4393 bus_size_t cmdsize, ctlsize; 4394 4395 if (pciide_chipen(sc, pa) == 0) 4396 return; 4397 revision = sc->sc_rev = PCI_REVISION(pa->pa_class); 4398 4399 /* 4400 * when the chip is in native mode it identifies itself as a 4401 * 'misc mass storage'. Fake interface in this case. 4402 */ 4403 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4404 interface = PCI_INTERFACE(pa->pa_class); 4405 } else { 4406 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4407 PCIIDE_INTERFACE_PCI(0); 4408 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 4409 (revision == HPT370_REV || revision == HPT370A_REV || 4410 revision == HPT372_REV)) || 4411 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 4412 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 4413 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 4414 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 4415 interface |= PCIIDE_INTERFACE_PCI(1); 4416 } 4417 4418 printf(": DMA"); 4419 pciide_mapreg_dma(sc, pa); 4420 printf("\n"); 4421 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4422 WDC_CAPABILITY_MODE; 4423 if (sc->sc_dma_ok) { 4424 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4425 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4426 sc->sc_wdcdev.irqack = pciide_irqack; 4427 } 4428 sc->sc_wdcdev.PIO_cap = 4; 4429 sc->sc_wdcdev.DMA_cap = 2; 4430 4431 sc->sc_wdcdev.set_modes = hpt_setup_channel; 4432 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4433 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 4434 revision == HPT366_REV) { 4435 sc->sc_wdcdev.UDMA_cap = 4; 4436 /* 4437 * The 366 has 2 PCI IDE functions, one for primary and one 4438 * for secondary. So we need to call pciide_mapregs_compat() 4439 * with the real channel 4440 */ 4441 if (pa->pa_function == 0) { 4442 compatchan = 0; 4443 } else if (pa->pa_function == 1) { 4444 compatchan = 1; 4445 } else { 4446 printf("%s: unexpected PCI function %d\n", 4447 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 4448 return; 4449 } 4450 sc->sc_wdcdev.nchannels = 1; 4451 } else { 4452 sc->sc_wdcdev.nchannels = 2; 4453 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 4454 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 4455 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 4456 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 4457 sc->sc_wdcdev.UDMA_cap = 6; 4458 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 4459 if (revision == HPT372_REV) 4460 sc->sc_wdcdev.UDMA_cap = 6; 4461 else 4462 sc->sc_wdcdev.UDMA_cap = 5; 4463 } 4464 } 4465 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4466 cp = &sc->pciide_channels[i]; 4467 if (sc->sc_wdcdev.nchannels > 1) { 4468 compatchan = i; 4469 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 4470 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 4471 printf("%s: %s ignored (disabled)\n", 4472 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4473 continue; 4474 } 4475 } 4476 if (pciide_chansetup(sc, i, interface) == 0) 4477 continue; 4478 if (interface & PCIIDE_INTERFACE_PCI(i)) { 4479 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 4480 &ctlsize, hpt_pci_intr); 4481 } else { 4482 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 4483 &cmdsize, &ctlsize); 4484 } 4485 if (cp->hw_ok == 0) 4486 return; 4487 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4488 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4489 wdcattach(&cp->wdc_channel); 4490 hpt_setup_channel(&cp->wdc_channel); 4491 } 4492 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 4493 (revision == HPT370_REV || revision == HPT370A_REV || 4494 revision == HPT372_REV)) || 4495 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 4496 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 4497 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 4498 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 4499 /* 4500 * Turn off fast interrupts 4501 */ 4502 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 4503 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 4504 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 4505 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 4506 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 4507 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 4508 4509 /* 4510 * HPT370 and highter has a bit to disable interrupts, 4511 * make sure to clear it 4512 */ 4513 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 4514 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 4515 ~HPT_CSEL_IRQDIS); 4516 } 4517 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 4518 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 4519 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 4520 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 4521 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 4522 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 4523 revision == HPT372_REV)) 4524 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 4525 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 4526 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 4527 4528 return; 4529 } 4530 4531 void 4532 hpt_setup_channel(chp) 4533 struct channel_softc *chp; 4534 { 4535 struct ata_drive_datas *drvp; 4536 int drive; 4537 int cable; 4538 u_int32_t before, after; 4539 u_int32_t idedma_ctl; 4540 struct pciide_channel *cp = (struct pciide_channel*)chp; 4541 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4542 int revision = sc->sc_rev; 4543 u_int32_t *tim_pio, *tim_dma, *tim_udma; 4544 4545 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 4546 4547 /* setup DMA if needed */ 4548 pciide_channel_dma_setup(cp); 4549 4550 idedma_ctl = 0; 4551 4552 switch (sc->sc_pp->ide_product) { 4553 case PCI_PRODUCT_TRIONES_HPT366: 4554 if (revision == HPT370_REV || 4555 revision == HPT370A_REV) { 4556 tim_pio = hpt370_pio; 4557 tim_dma = hpt370_dma; 4558 tim_udma = hpt370_udma; 4559 } else if (revision == HPT372_REV) { 4560 tim_pio = hpt372_pio; 4561 tim_dma = hpt372_dma; 4562 tim_udma = hpt372_udma; 4563 } else { 4564 tim_pio = hpt366_pio; 4565 tim_dma = hpt366_dma; 4566 tim_udma = hpt366_udma; 4567 } 4568 break; 4569 case PCI_PRODUCT_TRIONES_HPT372A: 4570 case PCI_PRODUCT_TRIONES_HPT302: 4571 case PCI_PRODUCT_TRIONES_HPT371: 4572 tim_pio = hpt372_pio; 4573 tim_dma = hpt372_dma; 4574 tim_udma = hpt372_udma; 4575 break; 4576 case PCI_PRODUCT_TRIONES_HPT374: 4577 tim_pio = hpt374_pio; 4578 tim_dma = hpt374_dma; 4579 tim_udma = hpt374_udma; 4580 break; 4581 default: 4582 printf("%s: no known timing values\n", 4583 sc->sc_wdcdev.sc_dev.dv_xname); 4584 goto end; 4585 } 4586 4587 /* Per drive settings */ 4588 for (drive = 0; drive < 2; drive++) { 4589 drvp = &chp->ch_drive[drive]; 4590 /* If no drive, skip */ 4591 if ((drvp->drive_flags & DRIVE) == 0) 4592 continue; 4593 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 4594 HPT_IDETIM(chp->channel, drive)); 4595 4596 /* add timing values, setup DMA if needed */ 4597 if (drvp->drive_flags & DRIVE_UDMA) { 4598 /* use Ultra/DMA */ 4599 drvp->drive_flags &= ~DRIVE_DMA; 4600 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 4601 drvp->UDMA_mode > 2) { 4602 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 4603 "cable not detected\n", drvp->drive_name, 4604 sc->sc_wdcdev.sc_dev.dv_xname, 4605 chp->channel, drive), DEBUG_PROBE); 4606 drvp->UDMA_mode = 2; 4607 } 4608 after = tim_udma[drvp->UDMA_mode]; 4609 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4610 } else if (drvp->drive_flags & DRIVE_DMA) { 4611 /* 4612 * use Multiword DMA. 4613 * Timings will be used for both PIO and DMA, so adjust 4614 * DMA mode if needed 4615 */ 4616 if (drvp->PIO_mode >= 3 && 4617 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 4618 drvp->DMA_mode = drvp->PIO_mode - 2; 4619 } 4620 after = tim_dma[drvp->DMA_mode]; 4621 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4622 } else { 4623 /* PIO only */ 4624 after = tim_pio[drvp->PIO_mode]; 4625 } 4626 pci_conf_write(sc->sc_pc, sc->sc_tag, 4627 HPT_IDETIM(chp->channel, drive), after); 4628 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 4629 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 4630 after, before), DEBUG_PROBE); 4631 } 4632 end: 4633 if (idedma_ctl != 0) { 4634 /* Add software bits in status register */ 4635 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4636 IDEDMA_CTL(chp->channel), idedma_ctl); 4637 } 4638 pciide_print_modes(cp); 4639 } 4640 4641 int 4642 hpt_pci_intr(arg) 4643 void *arg; 4644 { 4645 struct pciide_softc *sc = arg; 4646 struct pciide_channel *cp; 4647 struct channel_softc *wdc_cp; 4648 int rv = 0; 4649 int dmastat, i, crv; 4650 4651 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4652 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4653 IDEDMA_CTL(i)); 4654 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 4655 IDEDMA_CTL_INTR) 4656 continue; 4657 cp = &sc->pciide_channels[i]; 4658 wdc_cp = &cp->wdc_channel; 4659 crv = wdcintr(wdc_cp); 4660 if (crv == 0) { 4661 printf("%s:%d: bogus intr\n", 4662 sc->sc_wdcdev.sc_dev.dv_xname, i); 4663 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4664 IDEDMA_CTL(i), dmastat); 4665 } else 4666 rv = 1; 4667 } 4668 return rv; 4669 } 4670 4671 /* Macros to test product */ 4672 #define PDC_IS_262(sc) \ 4673 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 4674 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 4675 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 4676 #define PDC_IS_265(sc) \ 4677 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 4678 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 4679 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 4680 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 4681 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 4682 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 4683 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 4684 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 4685 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277 || \ 4686 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20376) 4687 #define PDC_IS_268(sc) \ 4688 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 4689 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 4690 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 4691 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 4692 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 4693 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 4694 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277 || \ 4695 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20376) 4696 #define PDC_IS_269(sc) \ 4697 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 4698 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 4699 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 4700 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 4701 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277 || \ 4702 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20376) 4703 4704 static __inline u_int8_t 4705 pdc268_config_read(struct channel_softc *chp, int index) 4706 { 4707 struct pciide_channel *cp = (struct pciide_channel *)chp; 4708 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4709 int channel = chp->channel; 4710 4711 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4712 PDC268_INDEX(channel), index); 4713 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4714 PDC268_DATA(channel))); 4715 } 4716 4717 static __inline void 4718 pdc268_config_write(struct channel_softc *chp, int index, u_int8_t value) 4719 { 4720 struct pciide_channel *cp = (struct pciide_channel *)chp; 4721 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4722 int channel = chp->channel; 4723 4724 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4725 PDC268_INDEX(channel), index); 4726 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4727 PDC268_DATA(channel), value); 4728 } 4729 4730 void 4731 pdc202xx_chip_map(sc, pa) 4732 struct pciide_softc *sc; 4733 struct pci_attach_args *pa; 4734 { 4735 struct pciide_channel *cp; 4736 int channel; 4737 pcireg_t interface, st, mode; 4738 bus_size_t cmdsize, ctlsize; 4739 4740 if (!PDC_IS_268(sc)) { 4741 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 4742 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 4743 st), DEBUG_PROBE); 4744 } 4745 if (pciide_chipen(sc, pa) == 0) 4746 return; 4747 4748 /* turn off RAID mode */ 4749 if (!PDC_IS_268(sc)) 4750 st &= ~PDC2xx_STATE_IDERAID; 4751 4752 /* 4753 * can't rely on the PCI_CLASS_REG content if the chip was in raid 4754 * mode. We have to fake interface 4755 */ 4756 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 4757 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 4758 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4759 4760 printf(": DMA"); 4761 pciide_mapreg_dma(sc, pa); 4762 4763 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4764 WDC_CAPABILITY_MODE; 4765 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 4766 PDC_IS_262(sc)) 4767 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 4768 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20376) 4769 sc->sc_wdcdev.cap |= WDC_CAPABILITY_SATA; 4770 if (sc->sc_dma_ok) { 4771 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4772 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4773 sc->sc_wdcdev.irqack = pciide_irqack; 4774 } 4775 sc->sc_wdcdev.PIO_cap = 4; 4776 sc->sc_wdcdev.DMA_cap = 2; 4777 if (PDC_IS_269(sc)) 4778 sc->sc_wdcdev.UDMA_cap = 6; 4779 else if (PDC_IS_265(sc)) 4780 sc->sc_wdcdev.UDMA_cap = 5; 4781 else if (PDC_IS_262(sc)) 4782 sc->sc_wdcdev.UDMA_cap = 4; 4783 else 4784 sc->sc_wdcdev.UDMA_cap = 2; 4785 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 4786 pdc20268_setup_channel : pdc202xx_setup_channel; 4787 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4788 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4789 4790 if (PDC_IS_262(sc)) { 4791 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 4792 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 4793 } 4794 4795 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4796 if (!PDC_IS_268(sc)) { 4797 /* setup failsafe defaults */ 4798 mode = 0; 4799 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 4800 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 4801 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 4802 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 4803 for (channel = 0; 4804 channel < sc->sc_wdcdev.nchannels; 4805 channel++) { 4806 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 4807 "drive 0 initial timings 0x%x, now 0x%x\n", 4808 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 4809 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 4810 DEBUG_PROBE); 4811 pci_conf_write(sc->sc_pc, sc->sc_tag, 4812 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 4813 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 4814 "drive 1 initial timings 0x%x, now 0x%x\n", 4815 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 4816 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 4817 pci_conf_write(sc->sc_pc, sc->sc_tag, 4818 PDC2xx_TIM(channel, 1), mode); 4819 } 4820 4821 mode = PDC2xx_SCR_DMA; 4822 if (PDC_IS_262(sc)) { 4823 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 4824 } else { 4825 /* the BIOS set it up this way */ 4826 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 4827 } 4828 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 4829 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 4830 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 4831 "now 0x%x\n", 4832 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4833 PDC2xx_SCR), 4834 mode), DEBUG_PROBE); 4835 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4836 PDC2xx_SCR, mode); 4837 4838 /* controller initial state register is OK even without BIOS */ 4839 /* Set DMA mode to IDE DMA compatibility */ 4840 mode = 4841 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 4842 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 4843 DEBUG_PROBE); 4844 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 4845 mode | 0x1); 4846 mode = 4847 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 4848 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 4849 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 4850 mode | 0x1); 4851 } 4852 4853 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4854 cp = &sc->pciide_channels[channel]; 4855 if (pciide_chansetup(sc, channel, interface) == 0) 4856 continue; 4857 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 4858 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 4859 printf("%s: %s ignored (disabled)\n", 4860 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4861 continue; 4862 } 4863 pciide_map_compat_intr(pa, cp, channel, interface); 4864 if (cp->hw_ok == 0) 4865 continue; 4866 if (PDC_IS_265(sc)) 4867 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4868 pdc20265_pci_intr); 4869 else 4870 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4871 pdc202xx_pci_intr); 4872 if (cp->hw_ok == 0) { 4873 pciide_unmap_compat_intr(pa, cp, channel, interface); 4874 continue; 4875 } 4876 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 4877 st &= ~(PDC_IS_262(sc) ? 4878 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 4879 pciide_unmap_compat_intr(pa, cp, channel, interface); 4880 } 4881 if (PDC_IS_268(sc)) 4882 pdc20268_setup_channel(&cp->wdc_channel); 4883 else 4884 pdc202xx_setup_channel(&cp->wdc_channel); 4885 } 4886 if (!PDC_IS_268(sc)) { 4887 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 4888 "0x%x\n", st), DEBUG_PROBE); 4889 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 4890 } 4891 return; 4892 } 4893 4894 void 4895 pdc202xx_setup_channel(chp) 4896 struct channel_softc *chp; 4897 { 4898 struct ata_drive_datas *drvp; 4899 int drive; 4900 pcireg_t mode, st; 4901 u_int32_t idedma_ctl, scr, atapi; 4902 struct pciide_channel *cp = (struct pciide_channel*)chp; 4903 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4904 int channel = chp->channel; 4905 4906 /* setup DMA if needed */ 4907 pciide_channel_dma_setup(cp); 4908 4909 idedma_ctl = 0; 4910 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 4911 sc->sc_wdcdev.sc_dev.dv_xname, 4912 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 4913 DEBUG_PROBE); 4914 4915 /* Per channel settings */ 4916 if (PDC_IS_262(sc)) { 4917 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4918 PDC262_U66); 4919 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 4920 /* Check cable */ 4921 if ((st & PDC262_STATE_80P(channel)) != 0 && 4922 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 4923 chp->ch_drive[0].UDMA_mode > 2) || 4924 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 4925 chp->ch_drive[1].UDMA_mode > 2))) { 4926 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 4927 sc->sc_wdcdev.sc_dev.dv_xname, channel), 4928 DEBUG_PROBE); 4929 if (chp->ch_drive[0].UDMA_mode > 2) 4930 chp->ch_drive[0].UDMA_mode = 2; 4931 if (chp->ch_drive[1].UDMA_mode > 2) 4932 chp->ch_drive[1].UDMA_mode = 2; 4933 } 4934 /* Trim UDMA mode */ 4935 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 4936 chp->ch_drive[0].UDMA_mode <= 2) || 4937 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 4938 chp->ch_drive[1].UDMA_mode <= 2)) { 4939 if (chp->ch_drive[0].UDMA_mode > 2) 4940 chp->ch_drive[0].UDMA_mode = 2; 4941 if (chp->ch_drive[1].UDMA_mode > 2) 4942 chp->ch_drive[1].UDMA_mode = 2; 4943 } 4944 /* Set U66 if needed */ 4945 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 4946 chp->ch_drive[0].UDMA_mode > 2) || 4947 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 4948 chp->ch_drive[1].UDMA_mode > 2)) 4949 scr |= PDC262_U66_EN(channel); 4950 else 4951 scr &= ~PDC262_U66_EN(channel); 4952 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4953 PDC262_U66, scr); 4954 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 4955 sc->sc_wdcdev.sc_dev.dv_xname, channel, 4956 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4957 PDC262_ATAPI(channel))), DEBUG_PROBE); 4958 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 4959 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 4960 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 4961 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 4962 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 4963 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 4964 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 4965 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 4966 atapi = 0; 4967 else 4968 atapi = PDC262_ATAPI_UDMA; 4969 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4970 PDC262_ATAPI(channel), atapi); 4971 } 4972 } 4973 for (drive = 0; drive < 2; drive++) { 4974 drvp = &chp->ch_drive[drive]; 4975 /* If no drive, skip */ 4976 if ((drvp->drive_flags & DRIVE) == 0) 4977 continue; 4978 mode = 0; 4979 if (drvp->drive_flags & DRIVE_UDMA) { 4980 /* use Ultra/DMA */ 4981 drvp->drive_flags &= ~DRIVE_DMA; 4982 mode = PDC2xx_TIM_SET_MB(mode, 4983 pdc2xx_udma_mb[drvp->UDMA_mode]); 4984 mode = PDC2xx_TIM_SET_MC(mode, 4985 pdc2xx_udma_mc[drvp->UDMA_mode]); 4986 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4987 } else if (drvp->drive_flags & DRIVE_DMA) { 4988 mode = PDC2xx_TIM_SET_MB(mode, 4989 pdc2xx_dma_mb[drvp->DMA_mode]); 4990 mode = PDC2xx_TIM_SET_MC(mode, 4991 pdc2xx_dma_mc[drvp->DMA_mode]); 4992 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4993 } else { 4994 mode = PDC2xx_TIM_SET_MB(mode, 4995 pdc2xx_dma_mb[0]); 4996 mode = PDC2xx_TIM_SET_MC(mode, 4997 pdc2xx_dma_mc[0]); 4998 } 4999 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 5000 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 5001 if (drvp->drive_flags & DRIVE_ATA) 5002 mode |= PDC2xx_TIM_PRE; 5003 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 5004 if (drvp->PIO_mode >= 3) { 5005 mode |= PDC2xx_TIM_IORDY; 5006 if (drive == 0) 5007 mode |= PDC2xx_TIM_IORDYp; 5008 } 5009 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 5010 "timings 0x%x\n", 5011 sc->sc_wdcdev.sc_dev.dv_xname, 5012 chp->channel, drive, mode), DEBUG_PROBE); 5013 pci_conf_write(sc->sc_pc, sc->sc_tag, 5014 PDC2xx_TIM(chp->channel, drive), mode); 5015 } 5016 if (idedma_ctl != 0) { 5017 /* Add software bits in status register */ 5018 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5019 IDEDMA_CTL(channel), idedma_ctl); 5020 } 5021 pciide_print_modes(cp); 5022 } 5023 5024 void 5025 pdc20268_setup_channel(chp) 5026 struct channel_softc *chp; 5027 { 5028 struct ata_drive_datas *drvp; 5029 int drive, cable; 5030 u_int32_t idedma_ctl; 5031 struct pciide_channel *cp = (struct pciide_channel*)chp; 5032 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5033 int channel = chp->channel; 5034 5035 /* check 80 pins cable */ 5036 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 5037 5038 /* setup DMA if needed */ 5039 pciide_channel_dma_setup(cp); 5040 5041 idedma_ctl = 0; 5042 5043 for (drive = 0; drive < 2; drive++) { 5044 drvp = &chp->ch_drive[drive]; 5045 /* If no drive, skip */ 5046 if ((drvp->drive_flags & DRIVE) == 0) 5047 continue; 5048 if (drvp->drive_flags & DRIVE_UDMA) { 5049 /* use Ultra/DMA */ 5050 drvp->drive_flags &= ~DRIVE_DMA; 5051 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5052 if (cable && drvp->UDMA_mode > 2) { 5053 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5054 "cable not detected\n", drvp->drive_name, 5055 sc->sc_wdcdev.sc_dev.dv_xname, 5056 channel, drive), DEBUG_PROBE); 5057 drvp->UDMA_mode = 2; 5058 } 5059 } else if (drvp->drive_flags & DRIVE_DMA) { 5060 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5061 } 5062 } 5063 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 5064 if (idedma_ctl != 0) { 5065 /* Add software bits in status register */ 5066 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5067 IDEDMA_CTL(channel), idedma_ctl); 5068 } 5069 pciide_print_modes(cp); 5070 } 5071 5072 int 5073 pdc202xx_pci_intr(arg) 5074 void *arg; 5075 { 5076 struct pciide_softc *sc = arg; 5077 struct pciide_channel *cp; 5078 struct channel_softc *wdc_cp; 5079 int i, rv, crv; 5080 u_int32_t scr; 5081 5082 rv = 0; 5083 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 5084 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5085 cp = &sc->pciide_channels[i]; 5086 wdc_cp = &cp->wdc_channel; 5087 /* If a compat channel skip. */ 5088 if (cp->compat) 5089 continue; 5090 if (scr & PDC2xx_SCR_INT(i)) { 5091 crv = wdcintr(wdc_cp); 5092 if (crv == 0) 5093 printf("%s:%d: bogus intr (reg 0x%x)\n", 5094 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 5095 else 5096 rv = 1; 5097 } 5098 } 5099 return rv; 5100 } 5101 5102 int 5103 pdc20265_pci_intr(arg) 5104 void *arg; 5105 { 5106 struct pciide_softc *sc = arg; 5107 struct pciide_channel *cp; 5108 struct channel_softc *wdc_cp; 5109 int i, rv, crv; 5110 u_int32_t dmastat; 5111 5112 rv = 0; 5113 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5114 cp = &sc->pciide_channels[i]; 5115 wdc_cp = &cp->wdc_channel; 5116 /* If a compat channel skip. */ 5117 if (cp->compat) 5118 continue; 5119 5120 /* 5121 * In case of shared IRQ check that the interrupt 5122 * was actually generated by this channel. 5123 * Only check the channel that is enabled. 5124 */ 5125 if (cp->hw_ok && PDC_IS_268(sc)) { 5126 if ((pdc268_config_read(wdc_cp, 5127 0x0b) & PDC268_INTR) == 0) 5128 continue; 5129 } 5130 5131 /* 5132 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 5133 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 5134 * So use it instead (requires 2 reg reads instead of 1, 5135 * but we can't do it another way). 5136 */ 5137 dmastat = bus_space_read_1(sc->sc_dma_iot, 5138 sc->sc_dma_ioh, IDEDMA_CTL(i)); 5139 if ((dmastat & IDEDMA_CTL_INTR) == 0) 5140 continue; 5141 5142 crv = wdcintr(wdc_cp); 5143 if (crv == 0) 5144 printf("%s:%d: bogus intr\n", 5145 sc->sc_wdcdev.sc_dev.dv_xname, i); 5146 else 5147 rv = 1; 5148 } 5149 return rv; 5150 } 5151 5152 void 5153 pdc20262_dma_start(void *v, int channel, int drive) 5154 { 5155 struct pciide_softc *sc = v; 5156 struct pciide_dma_maps *dma_maps = 5157 &sc->pciide_channels[channel].dma_maps[drive]; 5158 u_int8_t clock; 5159 u_int32_t count; 5160 5161 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 5162 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5163 PDC262_U66); 5164 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5165 PDC262_U66, clock | PDC262_U66_EN(channel)); 5166 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 5167 count |= dma_maps->dma_flags & WDC_DMA_READ ? 5168 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 5169 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 5170 PDC262_ATAPI(channel), count); 5171 } 5172 5173 pciide_dma_start(v, channel, drive); 5174 } 5175 5176 int 5177 pdc20262_dma_finish(void *v, int channel, int drive, int force) 5178 { 5179 struct pciide_softc *sc = v; 5180 struct pciide_dma_maps *dma_maps = 5181 &sc->pciide_channels[channel].dma_maps[drive]; 5182 u_int8_t clock; 5183 5184 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 5185 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5186 PDC262_U66); 5187 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5188 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 5189 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 5190 PDC262_ATAPI(channel), 0); 5191 } 5192 5193 return (pciide_dma_finish(v, channel, drive, force)); 5194 } 5195 5196 /* 5197 * Inline functions for accessing the timing registers of the 5198 * OPTi controller. 5199 * 5200 * These *MUST* disable interrupts as they need atomic access to 5201 * certain magic registers. Failure to adhere to this *will* 5202 * break things in subtle ways if the wdc registers are accessed 5203 * by an interrupt routine while this magic sequence is executing. 5204 */ 5205 static __inline__ u_int8_t 5206 opti_read_config(struct channel_softc *chp, int reg) 5207 { 5208 u_int8_t rv; 5209 int s = splhigh(); 5210 5211 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 5212 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 5213 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 5214 5215 /* Followed by an 8-bit write of 0x3 to register #2 */ 5216 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 5217 5218 /* Now we can read the required register */ 5219 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 5220 5221 /* Restore the real registers */ 5222 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 5223 5224 splx(s); 5225 5226 return rv; 5227 } 5228 5229 static __inline__ void 5230 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 5231 { 5232 int s = splhigh(); 5233 5234 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 5235 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 5236 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 5237 5238 /* Followed by an 8-bit write of 0x3 to register #2 */ 5239 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 5240 5241 /* Now we can write the required register */ 5242 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 5243 5244 /* Restore the real registers */ 5245 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 5246 5247 splx(s); 5248 } 5249 5250 void 5251 opti_chip_map(sc, pa) 5252 struct pciide_softc *sc; 5253 struct pci_attach_args *pa; 5254 { 5255 struct pciide_channel *cp; 5256 bus_size_t cmdsize, ctlsize; 5257 pcireg_t interface; 5258 u_int8_t init_ctrl; 5259 int channel; 5260 5261 if (pciide_chipen(sc, pa) == 0) 5262 return; 5263 printf(": DMA"); 5264 /* 5265 * XXXSCW: 5266 * There seem to be a couple of buggy revisions/implementations 5267 * of the OPTi pciide chipset. This kludge seems to fix one of 5268 * the reported problems (NetBSD PR/11644) but still fails for the 5269 * other (NetBSD PR/13151), although the latter may be due to other 5270 * issues too... 5271 */ 5272 if (PCI_REVISION(pa->pa_class) <= 0x12) { 5273 printf(" (disabled)"); 5274 sc->sc_dma_ok = 0; 5275 sc->sc_wdcdev.cap = 0; 5276 } else { 5277 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 5278 pciide_mapreg_dma(sc, pa); 5279 } 5280 5281 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 5282 sc->sc_wdcdev.PIO_cap = 4; 5283 if (sc->sc_dma_ok) { 5284 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5285 sc->sc_wdcdev.irqack = pciide_irqack; 5286 sc->sc_wdcdev.DMA_cap = 2; 5287 } 5288 sc->sc_wdcdev.set_modes = opti_setup_channel; 5289 5290 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5291 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5292 5293 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 5294 OPTI_REG_INIT_CONTROL); 5295 5296 interface = PCI_INTERFACE(pa->pa_class); 5297 5298 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5299 5300 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5301 cp = &sc->pciide_channels[channel]; 5302 if (pciide_chansetup(sc, channel, interface) == 0) 5303 continue; 5304 if (channel == 1 && 5305 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 5306 printf("%s: %s ignored (disabled)\n", 5307 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5308 continue; 5309 } 5310 pciide_map_compat_intr(pa, cp, channel, interface); 5311 if (cp->hw_ok == 0) 5312 continue; 5313 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5314 pciide_pci_intr); 5315 if (cp->hw_ok == 0) { 5316 pciide_unmap_compat_intr(pa, cp, channel, interface); 5317 continue; 5318 } 5319 opti_setup_channel(&cp->wdc_channel); 5320 } 5321 } 5322 5323 void 5324 opti_setup_channel(chp) 5325 struct channel_softc *chp; 5326 { 5327 struct ata_drive_datas *drvp; 5328 struct pciide_channel *cp = (struct pciide_channel*)chp; 5329 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5330 int drive,spd; 5331 int mode[2]; 5332 u_int8_t rv, mr; 5333 5334 /* 5335 * The `Delay' and `Address Setup Time' fields of the 5336 * Miscellaneous Register are always zero initially. 5337 */ 5338 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 5339 mr &= ~(OPTI_MISC_DELAY_MASK | 5340 OPTI_MISC_ADDR_SETUP_MASK | 5341 OPTI_MISC_INDEX_MASK); 5342 5343 /* Prime the control register before setting timing values */ 5344 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 5345 5346 /* Determine the clockrate of the PCIbus the chip is attached to */ 5347 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 5348 spd &= OPTI_STRAP_PCI_SPEED_MASK; 5349 5350 /* setup DMA if needed */ 5351 pciide_channel_dma_setup(cp); 5352 5353 for (drive = 0; drive < 2; drive++) { 5354 drvp = &chp->ch_drive[drive]; 5355 /* If no drive, skip */ 5356 if ((drvp->drive_flags & DRIVE) == 0) { 5357 mode[drive] = -1; 5358 continue; 5359 } 5360 5361 if ((drvp->drive_flags & DRIVE_DMA)) { 5362 /* 5363 * Timings will be used for both PIO and DMA, 5364 * so adjust DMA mode if needed 5365 */ 5366 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5367 drvp->PIO_mode = drvp->DMA_mode + 2; 5368 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5369 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5370 drvp->PIO_mode - 2 : 0; 5371 if (drvp->DMA_mode == 0) 5372 drvp->PIO_mode = 0; 5373 5374 mode[drive] = drvp->DMA_mode + 5; 5375 } else 5376 mode[drive] = drvp->PIO_mode; 5377 5378 if (drive && mode[0] >= 0 && 5379 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 5380 /* 5381 * Can't have two drives using different values 5382 * for `Address Setup Time'. 5383 * Slow down the faster drive to compensate. 5384 */ 5385 int d = (opti_tim_as[spd][mode[0]] > 5386 opti_tim_as[spd][mode[1]]) ? 0 : 1; 5387 5388 mode[d] = mode[1-d]; 5389 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 5390 chp->ch_drive[d].DMA_mode = 0; 5391 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 5392 } 5393 } 5394 5395 for (drive = 0; drive < 2; drive++) { 5396 int m; 5397 if ((m = mode[drive]) < 0) 5398 continue; 5399 5400 /* Set the Address Setup Time and select appropriate index */ 5401 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 5402 rv |= OPTI_MISC_INDEX(drive); 5403 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 5404 5405 /* Set the pulse width and recovery timing parameters */ 5406 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 5407 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 5408 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 5409 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 5410 5411 /* Set the Enhanced Mode register appropriately */ 5412 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 5413 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 5414 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 5415 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 5416 } 5417 5418 /* Finally, enable the timings */ 5419 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 5420 5421 pciide_print_modes(cp); 5422 } 5423 5424 void 5425 serverworks_chip_map(sc, pa) 5426 struct pciide_softc *sc; 5427 struct pci_attach_args *pa; 5428 { 5429 struct pciide_channel *cp; 5430 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5431 pcitag_t pcib_tag; 5432 int channel; 5433 bus_size_t cmdsize, ctlsize; 5434 5435 if (pciide_chipen(sc, pa) == 0) 5436 return; 5437 5438 printf(": DMA"); 5439 pciide_mapreg_dma(sc, pa); 5440 printf("\n"); 5441 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5442 WDC_CAPABILITY_MODE; 5443 5444 if (sc->sc_dma_ok) { 5445 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5446 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5447 sc->sc_wdcdev.irqack = pciide_irqack; 5448 } 5449 sc->sc_wdcdev.PIO_cap = 4; 5450 sc->sc_wdcdev.DMA_cap = 2; 5451 switch (sc->sc_pp->ide_product) { 5452 case PCI_PRODUCT_RCC_OSB4_IDE: 5453 sc->sc_wdcdev.UDMA_cap = 2; 5454 break; 5455 case PCI_PRODUCT_RCC_CSB5_IDE: 5456 if (PCI_REVISION(pa->pa_class) < 0x92) 5457 sc->sc_wdcdev.UDMA_cap = 4; 5458 else 5459 sc->sc_wdcdev.UDMA_cap = 5; 5460 break; 5461 case PCI_PRODUCT_RCC_CSB6_IDE: 5462 sc->sc_wdcdev.UDMA_cap = 5; 5463 break; 5464 case PCI_PRODUCT_RCC_CSB6_IDE2: 5465 sc->sc_wdcdev.UDMA_cap = 4; 5466 break; 5467 } 5468 5469 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 5470 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5471 sc->sc_wdcdev.nchannels = 5472 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE2 ? 1 : 2); 5473 5474 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5475 cp = &sc->pciide_channels[channel]; 5476 if (pciide_chansetup(sc, channel, interface) == 0) 5477 continue; 5478 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5479 serverworks_pci_intr); 5480 if (cp->hw_ok == 0) 5481 return; 5482 pciide_map_compat_intr(pa, cp, channel, interface); 5483 if (cp->hw_ok == 0) 5484 return; 5485 serverworks_setup_channel(&cp->wdc_channel); 5486 } 5487 5488 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 5489 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 5490 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 5491 } 5492 5493 void 5494 serverworks_setup_channel(chp) 5495 struct channel_softc *chp; 5496 { 5497 struct ata_drive_datas *drvp; 5498 struct pciide_channel *cp = (struct pciide_channel*)chp; 5499 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5500 int channel = chp->channel; 5501 int drive, unit; 5502 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 5503 u_int32_t idedma_ctl; 5504 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 5505 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 5506 5507 /* setup DMA if needed */ 5508 pciide_channel_dma_setup(cp); 5509 5510 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 5511 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 5512 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 5513 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 5514 5515 pio_time &= ~(0xffff << (16 * channel)); 5516 dma_time &= ~(0xffff << (16 * channel)); 5517 pio_mode &= ~(0xff << (8 * channel + 16)); 5518 udma_mode &= ~(0xff << (8 * channel + 16)); 5519 udma_mode &= ~(3 << (2 * channel)); 5520 5521 idedma_ctl = 0; 5522 5523 /* Per drive settings */ 5524 for (drive = 0; drive < 2; drive++) { 5525 drvp = &chp->ch_drive[drive]; 5526 /* If no drive, skip */ 5527 if ((drvp->drive_flags & DRIVE) == 0) 5528 continue; 5529 unit = drive + 2 * channel; 5530 /* add timing values, setup DMA if needed */ 5531 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 5532 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 5533 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 5534 (drvp->drive_flags & DRIVE_UDMA)) { 5535 /* use Ultra/DMA, check for 80-pin cable */ 5536 if (drvp->UDMA_mode > 2 && 5537 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 5538 PCI_SUBSYS_ID_REG)) & 5539 (1 << (14 + channel))) == 0) { 5540 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5541 "cable not detected\n", drvp->drive_name, 5542 sc->sc_wdcdev.sc_dev.dv_xname, 5543 channel, drive), DEBUG_PROBE); 5544 drvp->UDMA_mode = 2; 5545 } 5546 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 5547 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 5548 udma_mode |= 1 << unit; 5549 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5550 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 5551 (drvp->drive_flags & DRIVE_DMA)) { 5552 /* use Multiword DMA */ 5553 drvp->drive_flags &= ~DRIVE_UDMA; 5554 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 5555 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5556 } else { 5557 /* PIO only */ 5558 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 5559 } 5560 } 5561 5562 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 5563 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 5564 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 5565 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 5566 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 5567 5568 if (idedma_ctl != 0) { 5569 /* Add software bits in status register */ 5570 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5571 IDEDMA_CTL(channel), idedma_ctl); 5572 } 5573 pciide_print_modes(cp); 5574 } 5575 5576 int 5577 serverworks_pci_intr(arg) 5578 void *arg; 5579 { 5580 struct pciide_softc *sc = arg; 5581 struct pciide_channel *cp; 5582 struct channel_softc *wdc_cp; 5583 int rv = 0; 5584 int dmastat, i, crv; 5585 5586 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5587 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5588 IDEDMA_CTL(i)); 5589 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 5590 IDEDMA_CTL_INTR) 5591 continue; 5592 cp = &sc->pciide_channels[i]; 5593 wdc_cp = &cp->wdc_channel; 5594 crv = wdcintr(wdc_cp); 5595 if (crv == 0) { 5596 printf("%s:%d: bogus intr\n", 5597 sc->sc_wdcdev.sc_dev.dv_xname, i); 5598 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5599 IDEDMA_CTL(i), dmastat); 5600 } else 5601 rv = 1; 5602 } 5603 return rv; 5604 } 5605 5606 5607 #define ACARD_IS_850(sc) \ 5608 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 5609 5610 void 5611 acard_chip_map(sc, pa) 5612 struct pciide_softc *sc; 5613 struct pci_attach_args *pa; 5614 { 5615 struct pciide_channel *cp; 5616 int i; 5617 pcireg_t interface; 5618 bus_size_t cmdsize, ctlsize; 5619 5620 if (pciide_chipen(sc, pa) == 0) 5621 return; 5622 5623 /* 5624 * when the chip is in native mode it identifies itself as a 5625 * 'misc mass storage'. Fake interface in this case. 5626 */ 5627 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 5628 interface = PCI_INTERFACE(pa->pa_class); 5629 } else { 5630 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 5631 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 5632 } 5633 5634 printf(": DMA"); 5635 pciide_mapreg_dma(sc, pa); 5636 printf("\n"); 5637 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5638 WDC_CAPABILITY_MODE; 5639 5640 if (sc->sc_dma_ok) { 5641 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5642 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5643 sc->sc_wdcdev.irqack = pciide_irqack; 5644 } 5645 sc->sc_wdcdev.PIO_cap = 4; 5646 sc->sc_wdcdev.DMA_cap = 2; 5647 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4; 5648 5649 sc->sc_wdcdev.set_modes = acard_setup_channel; 5650 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5651 sc->sc_wdcdev.nchannels = 2; 5652 5653 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5654 cp = &sc->pciide_channels[i]; 5655 if (pciide_chansetup(sc, i, interface) == 0) 5656 continue; 5657 if (interface & PCIIDE_INTERFACE_PCI(i)) { 5658 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 5659 &ctlsize, pciide_pci_intr); 5660 } else { 5661 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 5662 &cmdsize, &ctlsize); 5663 } 5664 if (cp->hw_ok == 0) 5665 return; 5666 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 5667 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 5668 wdcattach(&cp->wdc_channel); 5669 acard_setup_channel(&cp->wdc_channel); 5670 } 5671 if (!ACARD_IS_850(sc)) { 5672 u_int32_t reg; 5673 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 5674 reg &= ~ATP860_CTRL_INT; 5675 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 5676 } 5677 } 5678 5679 void 5680 acard_setup_channel(chp) 5681 struct channel_softc *chp; 5682 { 5683 struct ata_drive_datas *drvp; 5684 struct pciide_channel *cp = (struct pciide_channel*)chp; 5685 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5686 int channel = chp->channel; 5687 int drive; 5688 u_int32_t idetime, udma_mode; 5689 u_int32_t idedma_ctl; 5690 5691 /* setup DMA if needed */ 5692 pciide_channel_dma_setup(cp); 5693 5694 if (ACARD_IS_850(sc)) { 5695 idetime = 0; 5696 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 5697 udma_mode &= ~ATP850_UDMA_MASK(channel); 5698 } else { 5699 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 5700 idetime &= ~ATP860_SETTIME_MASK(channel); 5701 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 5702 udma_mode &= ~ATP860_UDMA_MASK(channel); 5703 } 5704 5705 idedma_ctl = 0; 5706 5707 /* Per drive settings */ 5708 for (drive = 0; drive < 2; drive++) { 5709 drvp = &chp->ch_drive[drive]; 5710 /* If no drive, skip */ 5711 if ((drvp->drive_flags & DRIVE) == 0) 5712 continue; 5713 /* add timing values, setup DMA if needed */ 5714 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 5715 (drvp->drive_flags & DRIVE_UDMA)) { 5716 /* use Ultra/DMA */ 5717 if (ACARD_IS_850(sc)) { 5718 idetime |= ATP850_SETTIME(drive, 5719 acard_act_udma[drvp->UDMA_mode], 5720 acard_rec_udma[drvp->UDMA_mode]); 5721 udma_mode |= ATP850_UDMA_MODE(channel, drive, 5722 acard_udma_conf[drvp->UDMA_mode]); 5723 } else { 5724 idetime |= ATP860_SETTIME(channel, drive, 5725 acard_act_udma[drvp->UDMA_mode], 5726 acard_rec_udma[drvp->UDMA_mode]); 5727 udma_mode |= ATP860_UDMA_MODE(channel, drive, 5728 acard_udma_conf[drvp->UDMA_mode]); 5729 } 5730 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5731 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 5732 (drvp->drive_flags & DRIVE_DMA)) { 5733 /* use Multiword DMA */ 5734 drvp->drive_flags &= ~DRIVE_UDMA; 5735 if (ACARD_IS_850(sc)) { 5736 idetime |= ATP850_SETTIME(drive, 5737 acard_act_dma[drvp->DMA_mode], 5738 acard_rec_dma[drvp->DMA_mode]); 5739 } else { 5740 idetime |= ATP860_SETTIME(channel, drive, 5741 acard_act_dma[drvp->DMA_mode], 5742 acard_rec_dma[drvp->DMA_mode]); 5743 } 5744 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5745 } else { 5746 /* PIO only */ 5747 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 5748 if (ACARD_IS_850(sc)) { 5749 idetime |= ATP850_SETTIME(drive, 5750 acard_act_pio[drvp->PIO_mode], 5751 acard_rec_pio[drvp->PIO_mode]); 5752 } else { 5753 idetime |= ATP860_SETTIME(channel, drive, 5754 acard_act_pio[drvp->PIO_mode], 5755 acard_rec_pio[drvp->PIO_mode]); 5756 } 5757 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 5758 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 5759 | ATP8x0_CTRL_EN(channel)); 5760 } 5761 } 5762 5763 if (idedma_ctl != 0) { 5764 /* Add software bits in status register */ 5765 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5766 IDEDMA_CTL(channel), idedma_ctl); 5767 } 5768 pciide_print_modes(cp); 5769 5770 if (ACARD_IS_850(sc)) { 5771 pci_conf_write(sc->sc_pc, sc->sc_tag, 5772 ATP850_IDETIME(channel), idetime); 5773 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 5774 } else { 5775 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 5776 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 5777 } 5778 } 5779 5780 int 5781 acard_pci_intr(arg) 5782 void *arg; 5783 { 5784 struct pciide_softc *sc = arg; 5785 struct pciide_channel *cp; 5786 struct channel_softc *wdc_cp; 5787 int rv = 0; 5788 int dmastat, i, crv; 5789 5790 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5791 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5792 IDEDMA_CTL(i)); 5793 if ((dmastat & IDEDMA_CTL_INTR) == 0) 5794 continue; 5795 cp = &sc->pciide_channels[i]; 5796 wdc_cp = &cp->wdc_channel; 5797 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) { 5798 (void)wdcintr(wdc_cp); 5799 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5800 IDEDMA_CTL(i), dmastat); 5801 continue; 5802 } 5803 crv = wdcintr(wdc_cp); 5804 if (crv == 0) 5805 printf("%s:%d: bogus intr\n", 5806 sc->sc_wdcdev.sc_dev.dv_xname, i); 5807 else if (crv == 1) 5808 rv = 1; 5809 else if (rv == 0) 5810 rv = crv; 5811 } 5812 return rv; 5813 } 5814 5815 void 5816 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5817 { 5818 struct pciide_channel *cp; 5819 int channel; 5820 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5821 bus_size_t cmdsize, ctlsize; 5822 u_int32_t conf; 5823 5824 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 5825 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 5826 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 5827 5828 if (pciide_chipen(sc, pa) == 0) 5829 return; 5830 5831 printf(": DMA"); 5832 pciide_mapreg_dma(sc, pa); 5833 5834 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5835 WDC_CAPABILITY_MODE; 5836 if (sc->sc_dma_ok) { 5837 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5838 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5839 sc->sc_wdcdev.irqack = pciide_irqack; 5840 } 5841 sc->sc_wdcdev.PIO_cap = 4; 5842 sc->sc_wdcdev.DMA_cap = 2; 5843 switch (PCI_PRODUCT(pa->pa_id)) { 5844 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 5845 sc->sc_wdcdev.UDMA_cap = 5; 5846 break; 5847 case PCI_PRODUCT_NVIDIA_NFORCE2_IDE: 5848 sc->sc_wdcdev.UDMA_cap = 6; 5849 break; 5850 default: 5851 sc->sc_wdcdev.UDMA_cap = 0; 5852 } 5853 sc->sc_wdcdev.set_modes = nforce_setup_channel; 5854 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5855 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5856 5857 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5858 5859 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5860 cp = &sc->pciide_channels[channel]; 5861 5862 if (pciide_chansetup(sc, channel, interface) == 0) 5863 continue; 5864 5865 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 5866 printf("%s: %s ignored (disabled)\n", 5867 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5868 continue; 5869 } 5870 5871 pciide_map_compat_intr(pa, cp, channel, interface); 5872 if (cp->hw_ok == 0) 5873 continue; 5874 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5875 nforce_pci_intr); 5876 if (cp->hw_ok == 0) { 5877 pciide_unmap_compat_intr(pa, cp, channel, interface); 5878 continue; 5879 } 5880 5881 if (pciide_chan_candisable(cp)) { 5882 conf &= ~NFORCE_CHAN_EN(channel); 5883 pciide_unmap_compat_intr(pa, cp, channel, interface); 5884 continue; 5885 } 5886 5887 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5888 } 5889 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 5890 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 5891 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 5892 } 5893 5894 void 5895 nforce_setup_channel(struct channel_softc *chp) 5896 { 5897 struct ata_drive_datas *drvp; 5898 int drive, mode; 5899 u_int32_t idedma_ctl; 5900 struct pciide_channel *cp = (struct pciide_channel*)chp; 5901 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5902 int channel = chp->channel; 5903 u_int32_t conf, piodmatim, piotim, udmatim; 5904 5905 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 5906 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 5907 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 5908 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 5909 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 5910 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 5911 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 5912 5913 /* Setup DMA if needed */ 5914 pciide_channel_dma_setup(cp); 5915 5916 /* Clear all bits for this channel */ 5917 idedma_ctl = 0; 5918 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 5919 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 5920 5921 /* Per channel settings */ 5922 for (drive = 0; drive < 2; drive++) { 5923 drvp = &chp->ch_drive[drive]; 5924 5925 /* If no drive, skip */ 5926 if ((drvp->drive_flags & DRIVE) == 0) 5927 continue; 5928 5929 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5930 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5931 /* Setup UltraDMA mode */ 5932 drvp->drive_flags &= ~DRIVE_DMA; 5933 5934 /* Check cable */ 5935 if ((conf & NFORCE_CONF_CABLE(channel, drive)) == 0 && 5936 drvp->UDMA_mode > 2) { 5937 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5938 "cable not detected\n", drvp->drive_name, 5939 sc->sc_wdcdev.sc_dev.dv_xname, 5940 channel, drive), DEBUG_PROBE); 5941 drvp->UDMA_mode = 2; 5942 } 5943 5944 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 5945 nforce_udma[drvp->UDMA_mode]) | 5946 NFORCE_UDMA_EN(channel, drive) | 5947 NFORCE_UDMA_ENM(channel, drive); 5948 5949 mode = drvp->PIO_mode; 5950 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5951 (drvp->drive_flags & DRIVE_DMA) != 0) { 5952 /* Setup multiword DMA mode */ 5953 drvp->drive_flags &= ~DRIVE_UDMA; 5954 5955 /* mode = min(pio, dma + 2) */ 5956 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5957 mode = drvp->PIO_mode; 5958 else 5959 mode = drvp->DMA_mode + 2; 5960 } else { 5961 goto pio; 5962 } 5963 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5964 5965 pio: 5966 /* Setup PIO mode */ 5967 if (mode <= 2) { 5968 drvp->DMA_mode = 0; 5969 drvp->PIO_mode = 0; 5970 mode = 0; 5971 } else { 5972 drvp->PIO_mode = mode; 5973 drvp->DMA_mode = mode - 2; 5974 } 5975 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 5976 nforce_pio[mode]); 5977 } 5978 5979 if (idedma_ctl != 0) { 5980 /* Add software bits in status register */ 5981 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5982 IDEDMA_CTL(channel), idedma_ctl); 5983 } 5984 5985 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 5986 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 5987 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 5988 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 5989 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 5990 5991 pciide_print_modes(cp); 5992 } 5993 5994 int 5995 nforce_pci_intr(void *arg) 5996 { 5997 struct pciide_softc *sc = arg; 5998 struct pciide_channel *cp; 5999 struct channel_softc *wdc_cp; 6000 int i, rv, crv; 6001 u_int32_t dmastat; 6002 6003 rv = 0; 6004 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6005 cp = &sc->pciide_channels[i]; 6006 wdc_cp = &cp->wdc_channel; 6007 6008 /* Skip compat channel */ 6009 if (cp->compat) 6010 continue; 6011 6012 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6013 IDEDMA_CTL(i)); 6014 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6015 continue; 6016 6017 crv = wdcintr(wdc_cp); 6018 if (crv == 0) 6019 printf("%s:%d: bogus intr\n", 6020 sc->sc_wdcdev.sc_dev.dv_xname, i); 6021 else 6022 rv = 1; 6023 } 6024 return rv; 6025 } 6026 6027 void 6028 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6029 { 6030 struct pciide_channel *cp; 6031 bus_size_t cmdsize, ctlsize; 6032 pcireg_t interface; 6033 int channel; 6034 6035 if (pciide_chipen(sc, pa) == 0) 6036 return; 6037 6038 printf("%s: DMA", 6039 sc->sc_wdcdev.sc_dev.dv_xname); 6040 #ifndef PCIIDE_I31244_ENABLEDMA 6041 if (PCI_REVISION(pa->pa_class) == 0) { 6042 printf(" disabled due to rev. 0"); 6043 sc->sc_dma_ok = 0; 6044 } else 6045 #endif 6046 pciide_mapreg_dma(sc, pa); 6047 printf("\n"); 6048 6049 /* 6050 * XXX Configure LEDs to show activity. 6051 */ 6052 6053 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6054 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 6055 sc->sc_wdcdev.PIO_cap = 4; 6056 if (sc->sc_dma_ok) { 6057 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6058 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6059 sc->sc_wdcdev.irqack = pciide_irqack; 6060 sc->sc_wdcdev.DMA_cap = 2; 6061 sc->sc_wdcdev.UDMA_cap = 6; 6062 } 6063 sc->sc_wdcdev.set_modes = sata_setup_channel; 6064 6065 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6066 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6067 6068 interface = PCI_INTERFACE(pa->pa_class); 6069 6070 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6071 cp = &sc->pciide_channels[channel]; 6072 if (pciide_chansetup(sc, channel, interface) == 0) 6073 continue; 6074 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6075 pciide_pci_intr); 6076 if (cp->hw_ok == 0) 6077 continue; 6078 pciide_map_compat_intr(pa, cp, channel, interface); 6079 sata_setup_channel(&cp->wdc_channel); 6080 } 6081 } 6082