1 /* $NetBSD: viaide.c,v 1.76 2011/07/10 20:01:37 jakllsch Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: viaide.c,v 1.76 2011/07/10 20:01:37 jakllsch Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 35 #include <dev/pci/pcivar.h> 36 #include <dev/pci/pcidevs.h> 37 #include <dev/pci/pciidereg.h> 38 #include <dev/pci/pciidevar.h> 39 #include <dev/pci/pciide_apollo_reg.h> 40 41 static int via_pcib_match(const struct pci_attach_args *); 42 static void via_chip_map(struct pciide_softc *, 43 const struct pci_attach_args *); 44 static void via_mapchan(const struct pci_attach_args *, 45 struct pciide_channel *, 46 pcireg_t, int (*)(void *)); 47 static void via_mapregs_compat_native(const struct pci_attach_args *, 48 struct pciide_channel *); 49 static int via_sata_chip_map_common(struct pciide_softc *, 50 const struct pci_attach_args *); 51 static void via_sata_chip_map(struct pciide_softc *, 52 const struct pci_attach_args *, int); 53 static void via_sata_chip_map_6(struct pciide_softc *, 54 const struct pci_attach_args *); 55 static void via_sata_chip_map_7(struct pciide_softc *, 56 const struct pci_attach_args *); 57 static void via_sata_chip_map_new(struct pciide_softc *, 58 const struct pci_attach_args *); 59 static void via_setup_channel(struct ata_channel *); 60 61 static int viaide_match(device_t, cfdata_t, void *); 62 static void viaide_attach(device_t, device_t, void *); 63 static const struct pciide_product_desc * 64 viaide_lookup(pcireg_t); 65 static bool viaide_suspend(device_t, const pmf_qual_t *); 66 static bool viaide_resume(device_t, const pmf_qual_t *); 67 68 CFATTACH_DECL_NEW(viaide, sizeof(struct pciide_softc), 69 viaide_match, viaide_attach, pciide_detach, NULL); 70 71 static const struct pciide_product_desc pciide_amd_products[] = { 72 { PCI_PRODUCT_AMD_PBC756_IDE, 73 0, 74 "AMD AMD756 IDE Controller", 75 via_chip_map 76 }, 77 { PCI_PRODUCT_AMD_PBC766_IDE, 78 0, 79 "AMD AMD766 IDE Controller", 80 via_chip_map 81 }, 82 { PCI_PRODUCT_AMD_PBC768_IDE, 83 0, 84 "AMD AMD768 IDE Controller", 85 via_chip_map 86 }, 87 { PCI_PRODUCT_AMD_PBC8111_IDE, 88 0, 89 "AMD AMD8111 IDE Controller", 90 via_chip_map 91 }, 92 { PCI_PRODUCT_AMD_CS5536_IDE, 93 0, 94 "AMD CS5536 IDE Controller", 95 via_chip_map 96 }, 97 { 0, 98 0, 99 NULL, 100 NULL 101 } 102 }; 103 104 static const struct pciide_product_desc pciide_nvidia_products[] = { 105 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100, 106 0, 107 "NVIDIA nForce IDE Controller", 108 via_chip_map 109 }, 110 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133, 111 0, 112 "NVIDIA nForce2 IDE Controller", 113 via_chip_map 114 }, 115 { PCI_PRODUCT_NVIDIA_NFORCE2_400_ATA133, 116 0, 117 "NVIDIA nForce2 Ultra 400 IDE Controller", 118 via_chip_map 119 }, 120 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 121 0, 122 "NVIDIA nForce2 Ultra 400 Serial ATA Controller", 123 via_sata_chip_map_6 124 }, 125 { PCI_PRODUCT_NVIDIA_NFORCE3_ATA133, 126 0, 127 "NVIDIA nForce3 IDE Controller", 128 via_chip_map 129 }, 130 { PCI_PRODUCT_NVIDIA_NFORCE3_250_ATA133, 131 0, 132 "NVIDIA nForce3 250 IDE Controller", 133 via_chip_map 134 }, 135 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 136 0, 137 "NVIDIA nForce3 250 Serial ATA Controller", 138 via_sata_chip_map_6 139 }, 140 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 141 0, 142 "NVIDIA nForce3 250 Serial ATA Controller", 143 via_sata_chip_map_6 144 }, 145 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 146 0, 147 "NVIDIA nForce4 IDE Controller", 148 via_chip_map 149 }, 150 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 151 0, 152 "NVIDIA nForce4 Serial ATA Controller", 153 via_sata_chip_map_6 154 }, 155 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 156 0, 157 "NVIDIA nForce4 Serial ATA Controller", 158 via_sata_chip_map_6 159 }, 160 { PCI_PRODUCT_NVIDIA_NFORCE430_ATA133, 161 0, 162 "NVIDIA nForce430 IDE Controller", 163 via_chip_map 164 }, 165 { PCI_PRODUCT_NVIDIA_NFORCE430_SATA1, 166 0, 167 "NVIDIA nForce430 Serial ATA Controller", 168 via_sata_chip_map_6 169 }, 170 { PCI_PRODUCT_NVIDIA_NFORCE430_SATA2, 171 0, 172 "NVIDIA nForce430 Serial ATA Controller", 173 via_sata_chip_map_6 174 }, 175 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 176 0, 177 "NVIDIA MCP04 IDE Controller", 178 via_chip_map 179 }, 180 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 181 0, 182 "NVIDIA MCP04 Serial ATA Controller", 183 via_sata_chip_map_6 184 }, 185 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 186 0, 187 "NVIDIA MCP04 Serial ATA Controller", 188 via_sata_chip_map_6 189 }, 190 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 191 0, 192 "NVIDIA MCP55 IDE Controller", 193 via_chip_map 194 }, 195 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 196 0, 197 "NVIDIA MCP55 Serial ATA Controller", 198 via_sata_chip_map_6 199 }, 200 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 201 0, 202 "NVIDIA MCP55 Serial ATA Controller", 203 via_sata_chip_map_6 204 }, 205 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 206 0, 207 "NVIDIA MCP61 IDE Controller", 208 via_chip_map 209 }, 210 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 211 0, 212 "NVIDIA MCP65 IDE Controller", 213 via_chip_map 214 }, 215 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 216 0, 217 "NVIDIA MCP73 IDE Controller", 218 via_chip_map 219 }, 220 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 221 0, 222 "NVIDIA MCP77 IDE Controller", 223 via_chip_map 224 }, 225 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 226 0, 227 "NVIDIA MCP61 Serial ATA Controller", 228 via_sata_chip_map_6 229 }, 230 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 231 0, 232 "NVIDIA MCP61 Serial ATA Controller", 233 via_sata_chip_map_6 234 }, 235 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 236 0, 237 "NVIDIA MCP61 Serial ATA Controller", 238 via_sata_chip_map_6 239 }, 240 { PCI_PRODUCT_NVIDIA_MCP65_SATA, 241 0, 242 "NVIDIA MCP65 Serial ATA Controller", 243 via_sata_chip_map_6 244 }, 245 { PCI_PRODUCT_NVIDIA_MCP65_SATA2, 246 0, 247 "NVIDIA MCP65 Serial ATA Controller", 248 via_sata_chip_map_6 249 }, 250 { PCI_PRODUCT_NVIDIA_MCP65_SATA3, 251 0, 252 "NVIDIA MCP65 Serial ATA Controller", 253 via_sata_chip_map_6 254 }, 255 { PCI_PRODUCT_NVIDIA_MCP65_SATA4, 256 0, 257 "NVIDIA MCP65 Serial ATA Controller", 258 via_sata_chip_map_6 259 }, 260 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 261 0, 262 "NVIDIA MCP67 IDE Controller", 263 via_chip_map, 264 }, 265 { PCI_PRODUCT_NVIDIA_MCP67_SATA, 266 0, 267 "NVIDIA MCP67 Serial ATA Controller", 268 via_sata_chip_map_6, 269 }, 270 { PCI_PRODUCT_NVIDIA_MCP67_SATA2, 271 0, 272 "NVIDIA MCP67 Serial ATA Controller", 273 via_sata_chip_map_6, 274 }, 275 { PCI_PRODUCT_NVIDIA_MCP67_SATA3, 276 0, 277 "NVIDIA MCP67 Serial ATA Controller", 278 via_sata_chip_map_6, 279 }, 280 { PCI_PRODUCT_NVIDIA_MCP67_SATA4, 281 0, 282 "NVIDIA MCP67 Serial ATA Controller", 283 via_sata_chip_map_6, 284 }, 285 { 0, 286 0, 287 NULL, 288 NULL 289 } 290 }; 291 292 static const struct pciide_product_desc pciide_via_products[] = { 293 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 294 0, 295 NULL, 296 via_chip_map, 297 }, 298 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 299 0, 300 NULL, 301 via_chip_map, 302 }, 303 { PCI_PRODUCT_VIATECH_CX700_IDE, 304 0, 305 NULL, 306 via_chip_map, 307 }, 308 { PCI_PRODUCT_VIATECH_CX700M2_IDE, 309 0, 310 NULL, 311 via_chip_map, 312 }, 313 { PCI_PRODUCT_VIATECH_VT6421_RAID, 314 0, 315 "VIA Technologies VT6421 Serial ATA RAID Controller", 316 via_sata_chip_map_new, 317 }, 318 { PCI_PRODUCT_VIATECH_VT8237_SATA, 319 0, 320 "VIA Technologies VT8237 SATA Controller", 321 via_sata_chip_map_7, 322 }, 323 { PCI_PRODUCT_VIATECH_VT8237A_SATA, 324 0, 325 "VIA Technologies VT8237A SATA Controller", 326 via_sata_chip_map_7, 327 }, 328 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, 329 0, 330 "VIA Technologies VT8237A (5337) SATA Controller", 331 via_sata_chip_map_7, 332 }, 333 { PCI_PRODUCT_VIATECH_VT8237R_SATA, 334 0, 335 "VIA Technologies VT8237R SATA Controller", 336 via_sata_chip_map_7, 337 }, 338 { PCI_PRODUCT_VIATECH_VT8237S_SATA, 339 0, 340 "VIA Technologies VT8237S SATA Controller", 341 via_sata_chip_map_7, 342 }, 343 { 0, 344 0, 345 NULL, 346 NULL 347 } 348 }; 349 350 static const struct pciide_product_desc * 351 viaide_lookup(pcireg_t id) 352 { 353 354 switch (PCI_VENDOR(id)) { 355 case PCI_VENDOR_VIATECH: 356 return (pciide_lookup_product(id, pciide_via_products)); 357 358 case PCI_VENDOR_AMD: 359 return (pciide_lookup_product(id, pciide_amd_products)); 360 361 case PCI_VENDOR_NVIDIA: 362 return (pciide_lookup_product(id, pciide_nvidia_products)); 363 } 364 return (NULL); 365 } 366 367 static int 368 viaide_match(device_t parent, cfdata_t match, void *aux) 369 { 370 const struct pci_attach_args *pa = aux; 371 372 if (viaide_lookup(pa->pa_id) != NULL) 373 return (2); 374 return (0); 375 } 376 377 static void 378 viaide_attach(device_t parent, device_t self, void *aux) 379 { 380 const struct pci_attach_args *pa = aux; 381 struct pciide_softc *sc = device_private(self); 382 const struct pciide_product_desc *pp; 383 384 sc->sc_wdcdev.sc_atac.atac_dev = self; 385 386 pp = viaide_lookup(pa->pa_id); 387 if (pp == NULL) 388 panic("viaide_attach"); 389 pciide_common_attach(sc, pa, pp); 390 391 if (!pmf_device_register(self, viaide_suspend, viaide_resume)) 392 aprint_error_dev(self, "couldn't establish power handler\n"); 393 } 394 395 static int 396 via_pcib_match(const struct pci_attach_args *pa) 397 { 398 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE && 399 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_ISA && 400 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH) 401 return (1); 402 return 0; 403 } 404 405 static bool 406 viaide_suspend(device_t dv, const pmf_qual_t *qual) 407 { 408 struct pciide_softc *sc = device_private(dv); 409 410 sc->sc_pm_reg[0] = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)); 411 /* APO_DATATIM(sc) includes APO_UDMA(sc) */ 412 sc->sc_pm_reg[1] = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)); 413 /* This two are VIA-only, but should be ignored by other devices. */ 414 sc->sc_pm_reg[2] = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC(sc)); 415 sc->sc_pm_reg[3] = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_MISCTIM(sc)); 416 417 return true; 418 } 419 420 static bool 421 viaide_resume(device_t dv, const pmf_qual_t *qual) 422 { 423 struct pciide_softc *sc = device_private(dv); 424 425 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc), 426 sc->sc_pm_reg[0]); 427 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc), 428 sc->sc_pm_reg[1]); 429 /* This two are VIA-only, but should be ignored by other devices. */ 430 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_CTLMISC(sc), 431 sc->sc_pm_reg[2]); 432 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_MISCTIM(sc), 433 sc->sc_pm_reg[3]); 434 435 return true; 436 } 437 438 static void 439 via_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) 440 { 441 struct pciide_channel *cp; 442 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 443 pcireg_t vendor = PCI_VENDOR(pa->pa_id); 444 int channel; 445 u_int32_t ideconf; 446 pcireg_t pcib_id, pcib_class; 447 struct pci_attach_args pcib_pa; 448 449 if (pciide_chipen(sc, pa) == 0) 450 return; 451 452 switch (vendor) { 453 case PCI_VENDOR_VIATECH: 454 /* 455 * get a PCI tag for the ISA bridge. 456 */ 457 if (pci_find_device(&pcib_pa, via_pcib_match) == 0) 458 goto unknown; 459 pcib_id = pcib_pa.pa_id; 460 pcib_class = pcib_pa.pa_class; 461 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 462 "VIA Technologies "); 463 switch (PCI_PRODUCT(pcib_id)) { 464 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 465 aprint_normal("VT82C586 (Apollo VP) "); 466 if(PCI_REVISION(pcib_class) >= 0x02) { 467 aprint_normal("ATA33 controller\n"); 468 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; 469 } else { 470 aprint_normal("controller\n"); 471 sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; 472 } 473 break; 474 case PCI_PRODUCT_VIATECH_VT82C596A: 475 aprint_normal("VT82C596A (Apollo Pro) "); 476 if (PCI_REVISION(pcib_class) >= 0x12) { 477 aprint_normal("ATA66 controller\n"); 478 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; 479 } else { 480 aprint_normal("ATA33 controller\n"); 481 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; 482 } 483 break; 484 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 485 aprint_normal("VT82C686A (Apollo KX133) "); 486 if (PCI_REVISION(pcib_class) >= 0x40) { 487 aprint_normal("ATA100 controller\n"); 488 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; 489 } else { 490 aprint_normal("ATA66 controller\n"); 491 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; 492 } 493 break; 494 case PCI_PRODUCT_VIATECH_VT8231: 495 aprint_normal("VT8231 ATA100 controller\n"); 496 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; 497 break; 498 case PCI_PRODUCT_VIATECH_VT8233: 499 aprint_normal("VT8233 ATA100 controller\n"); 500 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; 501 break; 502 case PCI_PRODUCT_VIATECH_VT8233A: 503 aprint_normal("VT8233A ATA133 controller\n"); 504 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 505 break; 506 case PCI_PRODUCT_VIATECH_VT8235: 507 aprint_normal("VT8235 ATA133 controller\n"); 508 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 509 break; 510 case PCI_PRODUCT_VIATECH_VT8237: 511 aprint_normal("VT8237 ATA133 controller\n"); 512 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 513 break; 514 case PCI_PRODUCT_VIATECH_VT8237A_ISA: 515 aprint_normal("VT8237A ATA133 controller\n"); 516 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 517 break; 518 case PCI_PRODUCT_VIATECH_CX700: 519 aprint_normal("CX700 ATA133 controller\n"); 520 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 521 break; 522 case PCI_PRODUCT_VIATECH_VT8251: 523 aprint_normal("VT8251 ATA133 controller\n"); 524 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 525 break; 526 default: 527 unknown: 528 aprint_normal("unknown VIA ATA controller\n"); 529 sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; 530 } 531 sc->sc_apo_regbase = APO_VIA_REGBASE; 532 break; 533 case PCI_VENDOR_AMD: 534 switch (sc->sc_pp->ide_product) { 535 case PCI_PRODUCT_AMD_PBC8111_IDE: 536 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 537 break; 538 case PCI_PRODUCT_AMD_CS5536_IDE: 539 case PCI_PRODUCT_AMD_PBC766_IDE: 540 case PCI_PRODUCT_AMD_PBC768_IDE: 541 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; 542 break; 543 default: 544 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; 545 } 546 sc->sc_apo_regbase = APO_AMD_REGBASE; 547 break; 548 case PCI_VENDOR_NVIDIA: 549 switch (sc->sc_pp->ide_product) { 550 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100: 551 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; 552 break; 553 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133: 554 case PCI_PRODUCT_NVIDIA_NFORCE2_400_ATA133: 555 case PCI_PRODUCT_NVIDIA_NFORCE3_ATA133: 556 case PCI_PRODUCT_NVIDIA_NFORCE3_250_ATA133: 557 case PCI_PRODUCT_NVIDIA_NFORCE4_ATA133: 558 case PCI_PRODUCT_NVIDIA_NFORCE430_ATA133: 559 case PCI_PRODUCT_NVIDIA_MCP04_IDE: 560 case PCI_PRODUCT_NVIDIA_MCP55_IDE: 561 case PCI_PRODUCT_NVIDIA_MCP61_IDE: 562 case PCI_PRODUCT_NVIDIA_MCP65_IDE: 563 case PCI_PRODUCT_NVIDIA_MCP67_IDE: 564 case PCI_PRODUCT_NVIDIA_MCP73_IDE: 565 case PCI_PRODUCT_NVIDIA_MCP77_IDE: 566 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 567 break; 568 } 569 sc->sc_apo_regbase = APO_NVIDIA_REGBASE; 570 break; 571 default: 572 panic("via_chip_map: unknown vendor"); 573 } 574 575 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 576 "bus-master DMA support present"); 577 pciide_mapreg_dma(sc, pa); 578 aprint_verbose("\n"); 579 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; 580 if (sc->sc_dma_ok) { 581 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 582 sc->sc_wdcdev.irqack = pciide_irqack; 583 if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 0) 584 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 585 } 586 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 587 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 588 sc->sc_wdcdev.sc_atac.atac_set_modes = via_setup_channel; 589 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 590 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 591 592 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 593 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) 594 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID; 595 596 wdc_allocate_regs(&sc->sc_wdcdev); 597 598 ATADEBUG_PRINT(("via_chip_map: old APO_IDECONF=0x%x, " 599 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 600 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)), 601 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC(sc)), 602 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)), 603 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc))), 604 DEBUG_PROBE); 605 606 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)); 607 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 608 channel++) { 609 cp = &sc->pciide_channels[channel]; 610 if (pciide_chansetup(sc, channel, interface) == 0) 611 continue; 612 613 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 614 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 615 "%s channel ignored (disabled)\n", cp->name); 616 cp->ata_channel.ch_flags |= ATACH_DISABLED; 617 continue; 618 } 619 via_mapchan(pa, cp, interface, pciide_pci_intr); 620 } 621 } 622 623 static void 624 via_mapchan(const struct pci_attach_args *pa, struct pciide_channel *cp, 625 pcireg_t interface, int (*pci_intr)(void *)) 626 { 627 struct ata_channel *wdc_cp; 628 struct pciide_softc *sc; 629 prop_bool_t compat_nat_enable; 630 631 wdc_cp = &cp->ata_channel; 632 sc = CHAN_TO_PCIIDE(&cp->ata_channel); 633 compat_nat_enable = prop_dictionary_get( 634 device_properties(sc->sc_wdcdev.sc_atac.atac_dev), 635 "use-compat-native-irq"); 636 637 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel)) { 638 /* native mode with irq 14/15 requested? */ 639 if (compat_nat_enable != NULL && 640 prop_bool_true(compat_nat_enable)) 641 via_mapregs_compat_native(pa, cp); 642 else 643 pciide_mapregs_native(pa, cp, pci_intr); 644 } else { 645 pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel); 646 if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0) 647 pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel); 648 } 649 wdcattach(wdc_cp); 650 } 651 652 /* 653 * At least under certain (mis)configurations (e.g. on the "Pegasos" board) 654 * the VT8231-IDE's native mode only works with irq 14/15, and cannot be 655 * programmed to use a single native PCI irq alone. So we install an interrupt 656 * handler for each channel, as in compatibility mode. 657 */ 658 static void 659 via_mapregs_compat_native(const struct pci_attach_args *pa, 660 struct pciide_channel *cp) 661 { 662 struct ata_channel *wdc_cp; 663 struct pciide_softc *sc; 664 665 wdc_cp = &cp->ata_channel; 666 sc = CHAN_TO_PCIIDE(&cp->ata_channel); 667 668 /* XXX prevent pciide_mapregs_native from installing a handler */ 669 if (sc->sc_pci_ih == NULL) 670 sc->sc_pci_ih = (void *)~0; 671 pciide_mapregs_native(pa, cp, NULL); 672 673 /* interrupts are fixed to 14/15, as in compatibility mode */ 674 cp->compat = 1; 675 if ((wdc_cp->ch_flags & ATACH_DISABLED) == 0) { 676 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 677 cp->ih = pciide_machdep_compat_intr_establish( 678 sc->sc_wdcdev.sc_atac.atac_dev, pa, wdc_cp->ch_channel, 679 pciide_compat_intr, cp); 680 if (cp->ih == NULL) { 681 #endif 682 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 683 "no compatibility interrupt for " 684 "use by %s channel\n", cp->name); 685 wdc_cp->ch_flags |= ATACH_DISABLED; 686 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 687 } 688 sc->sc_pci_ih = cp->ih; /* XXX */ 689 #endif 690 } 691 } 692 693 static void 694 via_setup_channel(struct ata_channel *chp) 695 { 696 u_int32_t udmatim_reg, datatim_reg; 697 u_int8_t idedma_ctl; 698 int mode, drive, s; 699 struct ata_drive_datas *drvp; 700 struct atac_softc *atac = chp->ch_atac; 701 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 702 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 703 #ifndef PCIIDE_AMD756_ENABLEDMA 704 int rev = PCI_REVISION( 705 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 706 #endif 707 708 idedma_ctl = 0; 709 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)); 710 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc)); 711 datatim_reg &= ~APO_DATATIM_MASK(chp->ch_channel); 712 udmatim_reg &= ~APO_UDMA_MASK(chp->ch_channel); 713 714 /* setup DMA if needed */ 715 pciide_channel_dma_setup(cp); 716 717 for (drive = 0; drive < 2; drive++) { 718 drvp = &chp->ch_drive[drive]; 719 /* If no drive, skip */ 720 if ((drvp->drive_flags & DRIVE) == 0) 721 continue; 722 /* add timing values, setup DMA if needed */ 723 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 724 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 725 mode = drvp->PIO_mode; 726 goto pio; 727 } 728 if ((atac->atac_cap & ATAC_CAP_UDMA) && 729 (drvp->drive_flags & DRIVE_UDMA)) { 730 /* use Ultra/DMA */ 731 s = splbio(); 732 drvp->drive_flags &= ~DRIVE_DMA; 733 splx(s); 734 udmatim_reg |= APO_UDMA_EN(chp->ch_channel, drive) | 735 APO_UDMA_EN_MTH(chp->ch_channel, drive); 736 switch (PCI_VENDOR(sc->sc_pci_id)) { 737 case PCI_VENDOR_VIATECH: 738 if (sc->sc_wdcdev.sc_atac.atac_udma_cap == 6) { 739 /* 8233a */ 740 udmatim_reg |= APO_UDMA_TIME( 741 chp->ch_channel, 742 drive, 743 via_udma133_tim[drvp->UDMA_mode]); 744 } else if (sc->sc_wdcdev.sc_atac.atac_udma_cap == 5) { 745 /* 686b */ 746 udmatim_reg |= APO_UDMA_TIME( 747 chp->ch_channel, 748 drive, 749 via_udma100_tim[drvp->UDMA_mode]); 750 } else if (sc->sc_wdcdev.sc_atac.atac_udma_cap == 4) { 751 /* 596b or 686a */ 752 udmatim_reg |= APO_UDMA_CLK66( 753 chp->ch_channel); 754 udmatim_reg |= APO_UDMA_TIME( 755 chp->ch_channel, 756 drive, 757 via_udma66_tim[drvp->UDMA_mode]); 758 } else { 759 /* 596a or 586b */ 760 udmatim_reg |= APO_UDMA_TIME( 761 chp->ch_channel, 762 drive, 763 via_udma33_tim[drvp->UDMA_mode]); 764 } 765 break; 766 case PCI_VENDOR_AMD: 767 case PCI_VENDOR_NVIDIA: 768 udmatim_reg |= APO_UDMA_TIME(chp->ch_channel, 769 drive, amd7x6_udma_tim[drvp->UDMA_mode]); 770 break; 771 } 772 /* can use PIO timings, MW DMA unused */ 773 mode = drvp->PIO_mode; 774 } else { 775 /* use Multiword DMA, but only if revision is OK */ 776 s = splbio(); 777 drvp->drive_flags &= ~DRIVE_UDMA; 778 splx(s); 779 #ifndef PCIIDE_AMD756_ENABLEDMA 780 /* 781 * The workaround doesn't seem to be necessary 782 * with all drives, so it can be disabled by 783 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 784 * triggered. 785 */ 786 if (PCI_VENDOR(sc->sc_pci_id) == PCI_VENDOR_AMD && 787 sc->sc_pp->ide_product == 788 PCI_PRODUCT_AMD_PBC756_IDE && 789 AMD756_CHIPREV_DISABLEDMA(rev)) { 790 aprint_normal( 791 "%s:%d:%d: multi-word DMA disabled due " 792 "to chip revision\n", 793 device_xname( 794 sc->sc_wdcdev.sc_atac.atac_dev), 795 chp->ch_channel, drive); 796 mode = drvp->PIO_mode; 797 s = splbio(); 798 drvp->drive_flags &= ~DRIVE_DMA; 799 splx(s); 800 goto pio; 801 } 802 #endif 803 /* mode = min(pio, dma+2) */ 804 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 805 mode = drvp->PIO_mode; 806 else 807 mode = drvp->DMA_mode + 2; 808 } 809 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 810 811 pio: /* setup PIO mode */ 812 if (mode <= 2) { 813 drvp->DMA_mode = 0; 814 drvp->PIO_mode = 0; 815 mode = 0; 816 } else { 817 drvp->PIO_mode = mode; 818 drvp->DMA_mode = mode - 2; 819 } 820 datatim_reg |= 821 APO_DATATIM_PULSE(chp->ch_channel, drive, 822 apollo_pio_set[mode]) | 823 APO_DATATIM_RECOV(chp->ch_channel, drive, 824 apollo_pio_rec[mode]); 825 } 826 if (idedma_ctl != 0) { 827 /* Add software bits in status register */ 828 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 829 idedma_ctl); 830 } 831 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc), datatim_reg); 832 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA(sc), udmatim_reg); 833 ATADEBUG_PRINT(("via_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 834 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)), 835 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc))), DEBUG_PROBE); 836 } 837 838 static int 839 via_sata_chip_map_common(struct pciide_softc *sc, 840 const struct pci_attach_args *cpa) 841 { 842 pcireg_t csr; 843 int maptype, ret; 844 struct pci_attach_args pac, *pa = &pac; 845 846 pac = *cpa; 847 848 if (pciide_chipen(sc, pa) == 0) 849 return 0; 850 851 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 852 "bus-master DMA support present"); 853 pciide_mapreg_dma(sc, pa); 854 aprint_verbose("\n"); 855 856 if (sc->sc_dma_ok) { 857 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA | ATAC_CAP_DMA; 858 sc->sc_wdcdev.irqack = pciide_irqack; 859 } 860 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 861 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 862 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 863 864 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 865 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 866 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; 867 sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; 868 869 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 870 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) 871 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID; 872 873 wdc_allocate_regs(&sc->sc_wdcdev); 874 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 875 PCI_MAPREG_START + 0x14); 876 switch(maptype) { 877 case PCI_MAPREG_TYPE_IO: 878 ret = pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 879 PCI_MAPREG_TYPE_IO, 0, &sc->sc_ba5_st, &sc->sc_ba5_sh, 880 NULL, &sc->sc_ba5_ss); 881 break; 882 case PCI_MAPREG_MEM_TYPE_32BIT: 883 /* 884 * Enable memory-space access if it isn't already there. 885 */ 886 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, 887 PCI_COMMAND_STATUS_REG); 888 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0 && 889 (pa->pa_flags & PCI_FLAGS_MEM_OKAY) != 0) { 890 891 pci_conf_write(pa->pa_pc, pa->pa_tag, 892 PCI_COMMAND_STATUS_REG, 893 csr | PCI_COMMAND_MEM_ENABLE); 894 } 895 896 ret = pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 897 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 898 0, &sc->sc_ba5_st, &sc->sc_ba5_sh, 899 NULL, &sc->sc_ba5_ss); 900 break; 901 default: 902 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 903 "couldn't map sata regs, unsupported maptype (0x%x)\n", 904 maptype); 905 return 0; 906 } 907 if (ret != 0) { 908 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 909 "couldn't map sata regs\n"); 910 return 0; 911 } 912 return 1; 913 } 914 915 static void 916 via_sata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa, 917 int satareg_shift) 918 { 919 struct pciide_channel *cp; 920 struct ata_channel *wdc_cp; 921 struct wdc_regs *wdr; 922 pcireg_t interface; 923 int channel; 924 925 interface = PCI_INTERFACE(pa->pa_class); 926 927 if (via_sata_chip_map_common(sc, pa) == 0) 928 return; 929 930 if (interface == 0) { 931 ATADEBUG_PRINT(("via_sata_chip_map interface == 0\n"), 932 DEBUG_PROBE); 933 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 934 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 935 } 936 937 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 938 channel++) { 939 cp = &sc->pciide_channels[channel]; 940 if (pciide_chansetup(sc, channel, interface) == 0) 941 continue; 942 wdc_cp = &cp->ata_channel; 943 wdr = CHAN_TO_WDC_REGS(wdc_cp); 944 wdr->sata_iot = sc->sc_ba5_st; 945 wdr->sata_baseioh = sc->sc_ba5_sh; 946 if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, 947 (wdc_cp->ch_channel << satareg_shift) + 0x0, 4, 948 &wdr->sata_status) != 0) { 949 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 950 "couldn't map channel %d sata_status regs\n", 951 wdc_cp->ch_channel); 952 continue; 953 } 954 if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, 955 (wdc_cp->ch_channel << satareg_shift) + 0x4, 4, 956 &wdr->sata_error) != 0) { 957 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 958 "couldn't map channel %d sata_error regs\n", 959 wdc_cp->ch_channel); 960 continue; 961 } 962 if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, 963 (wdc_cp->ch_channel << satareg_shift) + 0x8, 4, 964 &wdr->sata_control) != 0) { 965 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 966 "couldn't map channel %d sata_control regs\n", 967 wdc_cp->ch_channel); 968 continue; 969 } 970 sc->sc_wdcdev.sc_atac.atac_probe = wdc_sataprobe; 971 pciide_mapchan(pa, cp, interface, pciide_pci_intr); 972 } 973 } 974 975 static void 976 via_sata_chip_map_6(struct pciide_softc *sc, const struct pci_attach_args *pa) 977 { 978 via_sata_chip_map(sc, pa, 6); 979 } 980 981 static void 982 via_sata_chip_map_7(struct pciide_softc *sc, const struct pci_attach_args *pa) 983 { 984 via_sata_chip_map(sc, pa, 7); 985 } 986 987 static void 988 via_vt6421_mapreg_dma(struct pciide_softc *sc, const struct pci_attach_args *pa) 989 { 990 struct pciide_channel *pc; 991 int chan, reg; 992 bus_size_t size; 993 994 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 995 PCI_MAPREG_TYPE_IO, 0, &sc->sc_dma_iot, &sc->sc_dma_ioh, 996 NULL, &sc->sc_dma_ios) == 0); 997 sc->sc_dmat = pa->pa_dmat; 998 if (sc->sc_dma_ok == 0) { 999 aprint_verbose(", but unused (couldn't map registers)"); 1000 } else { 1001 sc->sc_wdcdev.dma_arg = sc; 1002 sc->sc_wdcdev.dma_init = pciide_dma_init; 1003 sc->sc_wdcdev.dma_start = pciide_dma_start; 1004 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1005 } 1006 1007 if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 1008 PCIIDE_OPTIONS_NODMA) { 1009 aprint_verbose( 1010 ", but unused (forced off by config file)"); 1011 sc->sc_dma_ok = 0; 1012 } 1013 1014 if (sc->sc_dma_ok == 0) 1015 return; 1016 1017 for (chan = 0; chan < 4; chan++) { 1018 pc = &sc->pciide_channels[chan]; 1019 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 1020 size = 4; 1021 if (size > (IDEDMA_SCH_OFFSET - reg)) 1022 size = IDEDMA_SCH_OFFSET - reg; 1023 if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh, 1024 IDEDMA_SCH_OFFSET * chan + reg, size, 1025 &pc->dma_iohs[reg]) != 0) { 1026 sc->sc_dma_ok = 0; 1027 aprint_verbose(", but can't subregion offset " 1028 "%d size %lu", 1029 reg, (u_long)size); 1030 return; 1031 } 1032 } 1033 } 1034 } 1035 1036 static int 1037 via_vt6421_chansetup(struct pciide_softc *sc, int channel) 1038 { 1039 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1040 1041 sc->wdc_chanarray[channel] = &cp->ata_channel; 1042 1043 cp->ata_channel.ch_channel = channel; 1044 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; 1045 cp->ata_channel.ch_queue = 1046 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT); 1047 cp->ata_channel.ch_ndrive = 2; 1048 if (cp->ata_channel.ch_queue == NULL) { 1049 aprint_error("%s channel %d: " 1050 "can't allocate memory for command queue", 1051 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel); 1052 return 0; 1053 } 1054 return 1; 1055 } 1056 1057 static void 1058 via_sata_chip_map_new(struct pciide_softc *sc, 1059 const struct pci_attach_args *pa) 1060 { 1061 struct pciide_channel *cp; 1062 struct ata_channel *wdc_cp; 1063 struct wdc_regs *wdr; 1064 int channel; 1065 pci_intr_handle_t intrhandle; 1066 const char *intrstr; 1067 int i; 1068 1069 if (pciide_chipen(sc, pa) == 0) 1070 return; 1071 1072 sc->sc_apo_regbase = APO_VIA_VT6421_REGBASE; 1073 1074 if (pci_mapreg_map(pa, PCI_BAR(5), PCI_MAPREG_TYPE_IO, 0, 1075 &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, &sc->sc_ba5_ss) != 0) { 1076 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1077 "couldn't map SATA regs\n"); 1078 } 1079 1080 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1081 "bus-master DMA support present"); 1082 via_vt6421_mapreg_dma(sc, pa); 1083 aprint_verbose("\n"); 1084 1085 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; 1086 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 1087 if (sc->sc_dma_ok) { 1088 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; 1089 sc->sc_wdcdev.irqack = pciide_irqack; 1090 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 1091 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 1092 } 1093 sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; 1094 1095 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 1096 sc->sc_wdcdev.sc_atac.atac_nchannels = 3; 1097 1098 wdc_allocate_regs(&sc->sc_wdcdev); 1099 1100 if (pci_intr_map(pa, &intrhandle) != 0) { 1101 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1102 "couldn't map native-PCI interrupt\n"); 1103 return; 1104 } 1105 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1106 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1107 intrhandle, IPL_BIO, pciide_pci_intr, sc); 1108 if (sc->sc_pci_ih == NULL) { 1109 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1110 "couldn't establish native-PCI interrupt"); 1111 if (intrstr != NULL) 1112 aprint_error(" at %s", intrstr); 1113 aprint_error("\n"); 1114 return; 1115 } 1116 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1117 "using %s for native-PCI interrupt\n", 1118 intrstr ? intrstr : "unknown interrupt"); 1119 1120 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 1121 channel++) { 1122 cp = &sc->pciide_channels[channel]; 1123 if (via_vt6421_chansetup(sc, channel) == 0) 1124 continue; 1125 cp->ata_channel.ch_ndrive = 2; 1126 wdc_cp = &cp->ata_channel; 1127 wdr = CHAN_TO_WDC_REGS(wdc_cp); 1128 1129 wdr->sata_iot = sc->sc_ba5_st; 1130 wdr->sata_baseioh = sc->sc_ba5_sh; 1131 if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, 1132 (wdc_cp->ch_channel << 6) + 0x0, 4, 1133 &wdr->sata_status) != 0) { 1134 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1135 "couldn't map channel %d sata_status regs\n", 1136 wdc_cp->ch_channel); 1137 continue; 1138 } 1139 if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, 1140 (wdc_cp->ch_channel << 6) + 0x4, 4, 1141 &wdr->sata_error) != 0) { 1142 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1143 "couldn't map channel %d sata_error regs\n", 1144 wdc_cp->ch_channel); 1145 continue; 1146 } 1147 if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, 1148 (wdc_cp->ch_channel << 6) + 0x8, 4, 1149 &wdr->sata_control) != 0) { 1150 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1151 "couldn't map channel %d sata_control regs\n", 1152 wdc_cp->ch_channel); 1153 continue; 1154 } 1155 1156 if (pci_mapreg_map(pa, PCI_BAR(wdc_cp->ch_channel), 1157 PCI_MAPREG_TYPE_IO, 0, &wdr->cmd_iot, &wdr->cmd_baseioh, 1158 NULL, &wdr->cmd_ios) != 0) { 1159 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1160 "couldn't map %s channel regs\n", cp->name); 1161 } 1162 wdr->ctl_iot = wdr->cmd_iot; 1163 for (i = 0; i < WDC_NREG; i++) { 1164 if (bus_space_subregion(wdr->cmd_iot, 1165 wdr->cmd_baseioh, i, i == 0 ? 4 : 1, 1166 &wdr->cmd_iohs[i]) != 0) { 1167 aprint_error_dev( 1168 sc->sc_wdcdev.sc_atac.atac_dev, 1169 "couldn't subregion %s " 1170 "channel cmd regs\n", cp->name); 1171 return; 1172 } 1173 } 1174 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 1175 WDC_NREG + 2, 1, &wdr->ctl_ioh) != 0) { 1176 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1177 "couldn't map channel %d ctl regs\n", channel); 1178 return; 1179 } 1180 wdc_init_shadow_regs(wdc_cp); 1181 wdr->data32iot = wdr->cmd_iot; 1182 wdr->data32ioh = wdr->cmd_iohs[wd_data]; 1183 wdcattach(wdc_cp); 1184 } 1185 } 1186