1 /* $OpenBSD: pciide.c,v 1.333 2011/09/17 12:23:19 jsg Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions 34 * are met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 3. All advertising materials mentioning features or use of this software 41 * must display the following acknowledgement: 42 * This product includes software developed by Christopher G. Demetriou 43 * for the NetBSD Project. 44 * 4. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * PCI IDE controller driver. 61 * 62 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 63 * sys/dev/pci/ppb.c, revision 1.16). 64 * 65 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 66 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 67 * 5/16/94" from the PCI SIG. 68 * 69 */ 70 71 #define DEBUG_DMA 0x01 72 #define DEBUG_XFERS 0x02 73 #define DEBUG_FUNCS 0x08 74 #define DEBUG_PROBE 0x10 75 76 #ifdef WDCDEBUG 77 #ifndef WDCDEBUG_PCIIDE_MASK 78 #define WDCDEBUG_PCIIDE_MASK 0x00 79 #endif 80 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 81 #define WDCDEBUG_PRINT(args, level) do { \ 82 if ((wdcdebug_pciide_mask & (level)) != 0) \ 83 printf args; \ 84 } while (0) 85 #else 86 #define WDCDEBUG_PRINT(args, level) 87 #endif 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/device.h> 91 #include <sys/malloc.h> 92 93 #include <machine/bus.h> 94 #include <machine/endian.h> 95 96 #include <dev/ata/atavar.h> 97 #include <dev/ata/satareg.h> 98 #include <dev/ic/wdcreg.h> 99 #include <dev/ic/wdcvar.h> 100 101 #include <dev/pci/pcireg.h> 102 #include <dev/pci/pcivar.h> 103 #include <dev/pci/pcidevs.h> 104 105 #include <dev/pci/pciidereg.h> 106 #include <dev/pci/pciidevar.h> 107 #include <dev/pci/pciide_piix_reg.h> 108 #include <dev/pci/pciide_amd_reg.h> 109 #include <dev/pci/pciide_apollo_reg.h> 110 #include <dev/pci/pciide_cmd_reg.h> 111 #include <dev/pci/pciide_sii3112_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/pciide_acard_reg.h> 119 #include <dev/pci/pciide_natsemi_reg.h> 120 #include <dev/pci/pciide_nforce_reg.h> 121 #include <dev/pci/pciide_i31244_reg.h> 122 #include <dev/pci/pciide_ite_reg.h> 123 #include <dev/pci/pciide_ixp_reg.h> 124 #include <dev/pci/pciide_svwsata_reg.h> 125 #include <dev/pci/pciide_jmicron_reg.h> 126 #include <dev/pci/cy82c693var.h> 127 128 /* functions for reading/writing 8-bit PCI registers */ 129 130 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 131 int); 132 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 133 int, u_int8_t); 134 135 u_int8_t 136 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 137 { 138 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 139 ((reg & 0x03) * 8) & 0xff); 140 } 141 142 void 143 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 144 { 145 pcireg_t pcival; 146 147 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 148 pcival &= ~(0xff << ((reg & 0x03) * 8)); 149 pcival |= (val << ((reg & 0x03) * 8)); 150 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 151 } 152 153 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 154 155 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 156 void sata_setup_channel(struct channel_softc *); 157 158 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 159 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 160 void piix_setup_channel(struct channel_softc *); 161 void piix3_4_setup_channel(struct channel_softc *); 162 void piix_timing_debug(struct pciide_softc *); 163 164 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 165 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 166 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 167 168 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 169 void amd756_setup_channel(struct channel_softc *); 170 171 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 172 void apollo_setup_channel(struct channel_softc *); 173 174 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 175 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 176 void cmd0643_9_setup_channel(struct channel_softc *); 177 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 178 void cmd680_setup_channel(struct channel_softc *); 179 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 180 void cmd_channel_map(struct pci_attach_args *, 181 struct pciide_softc *, int); 182 int cmd_pci_intr(void *); 183 void cmd646_9_irqack(struct channel_softc *); 184 185 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 186 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 187 void sii3112_setup_channel(struct channel_softc *); 188 void sii3112_drv_probe(struct channel_softc *); 189 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 190 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 191 int sii3114_chansetup(struct pciide_softc *, int); 192 void sii3114_mapchan(struct pciide_channel *); 193 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 194 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 195 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 196 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 197 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 198 199 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 200 void cy693_setup_channel(struct channel_softc *); 201 202 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 203 void sis_setup_channel(struct channel_softc *); 204 void sis96x_setup_channel(struct channel_softc *); 205 int sis_hostbr_match(struct pci_attach_args *); 206 int sis_south_match(struct pci_attach_args *); 207 208 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 209 void natsemi_setup_channel(struct channel_softc *); 210 int natsemi_pci_intr(void *); 211 void natsemi_irqack(struct channel_softc *); 212 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 213 void ns_scx200_setup_channel(struct channel_softc *); 214 215 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 216 void acer_setup_channel(struct channel_softc *); 217 int acer_pci_intr(void *); 218 int acer_dma_init(void *, int, int, void *, size_t, int); 219 220 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 221 void pdc202xx_setup_channel(struct channel_softc *); 222 void pdc20268_setup_channel(struct channel_softc *); 223 int pdc202xx_pci_intr(void *); 224 int pdc20265_pci_intr(void *); 225 void pdc20262_dma_start(void *, int, int); 226 int pdc20262_dma_finish(void *, int, int, int); 227 228 u_int8_t pdc268_config_read(struct channel_softc *, int); 229 230 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 231 void pdc203xx_setup_channel(struct channel_softc *); 232 int pdc203xx_pci_intr(void *); 233 void pdc203xx_irqack(struct channel_softc *); 234 void pdc203xx_dma_start(void *,int ,int); 235 int pdc203xx_dma_finish(void *, int, int, int); 236 int pdc205xx_pci_intr(void *); 237 void pdc205xx_do_reset(struct channel_softc *); 238 void pdc205xx_drv_probe(struct channel_softc *); 239 240 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 241 void opti_setup_channel(struct channel_softc *); 242 243 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 244 void hpt_setup_channel(struct channel_softc *); 245 int hpt_pci_intr(void *); 246 247 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 248 void acard_setup_channel(struct channel_softc *); 249 250 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 251 void serverworks_setup_channel(struct channel_softc *); 252 int serverworks_pci_intr(void *); 253 254 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 255 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 256 void svwsata_mapchan(struct pciide_channel *); 257 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 258 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 259 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 260 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 261 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 262 void svwsata_drv_probe(struct channel_softc *); 263 264 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 265 void nforce_setup_channel(struct channel_softc *); 266 int nforce_pci_intr(void *); 267 268 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 269 270 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 271 void ite_setup_channel(struct channel_softc *); 272 273 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 274 void ixp_setup_channel(struct channel_softc *); 275 276 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 277 void jmicron_setup_channel(struct channel_softc *); 278 279 void phison_chip_map(struct pciide_softc *, struct pci_attach_args *); 280 void phison_setup_channel(struct channel_softc *); 281 282 void sch_chip_map(struct pciide_softc *, struct pci_attach_args *); 283 void sch_setup_channel(struct channel_softc *); 284 285 struct pciide_product_desc { 286 u_int32_t ide_product; 287 u_short ide_flags; 288 /* map and setup chip, probe drives */ 289 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 290 }; 291 292 /* Flags for ide_flags */ 293 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 294 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 295 296 /* Default product description for devices not known from this controller */ 297 const struct pciide_product_desc default_product_desc = { 298 0, /* Generic PCI IDE controller */ 299 0, 300 default_chip_map 301 }; 302 303 const struct pciide_product_desc pciide_intel_products[] = { 304 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 305 0, 306 artisea_chip_map 307 }, 308 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 309 0, 310 default_chip_map 311 }, 312 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 313 0, 314 piix_chip_map 315 }, 316 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 317 0, 318 piix_chip_map 319 }, 320 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 321 0, 322 piix_chip_map 323 }, 324 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 325 0, 326 piix_chip_map 327 }, 328 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 329 0, 330 piix_chip_map 331 }, 332 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 333 0, 334 piix_chip_map 335 }, 336 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 337 0, 338 piix_chip_map 339 }, 340 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 341 0, 342 piix_chip_map 343 }, 344 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 345 0, 346 piix_chip_map 347 }, 348 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 349 0, 350 piix_chip_map 351 }, 352 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 353 0, 354 piix_chip_map 355 }, 356 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 357 0, 358 piix_chip_map 359 }, 360 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 361 0, 362 piix_chip_map 363 }, 364 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 365 0, 366 piix_chip_map 367 }, 368 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 369 0, 370 piix_chip_map 371 }, 372 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 373 0, 374 piix_chip_map 375 }, 376 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 377 0, 378 piix_chip_map 379 }, 380 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 381 0, 382 piix_chip_map 383 }, 384 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 385 0, 386 piixsata_chip_map 387 }, 388 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 389 0, 390 piixsata_chip_map 391 }, 392 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 393 0, 394 piix_chip_map 395 }, 396 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 397 0, 398 piixsata_chip_map 399 }, 400 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 401 0, 402 piixsata_chip_map 403 }, 404 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 405 0, 406 piix_chip_map 407 }, 408 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 409 0, 410 piix_chip_map 411 }, 412 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 413 0, 414 piixsata_chip_map 415 }, 416 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 417 0, 418 piixsata_chip_map 419 }, 420 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 421 0, 422 piixsata_chip_map 423 }, 424 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 425 0, 426 piix_chip_map 427 }, 428 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 429 0, 430 piixsata_chip_map 431 }, 432 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 433 0, 434 piixsata_chip_map 435 }, 436 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 437 0, 438 piixsata_chip_map 439 }, 440 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 441 0, 442 piixsata_chip_map 443 }, 444 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 445 0, 446 piixsata_chip_map 447 }, 448 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 449 0, 450 piixsata_chip_map 451 }, 452 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 453 0, 454 piixsata_chip_map 455 }, 456 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 457 0, 458 piixsata_chip_map 459 }, 460 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 461 0, 462 piixsata_chip_map 463 }, 464 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 465 0, 466 piixsata_chip_map 467 }, 468 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 469 0, 470 piixsata_chip_map 471 }, 472 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 473 0, 474 piixsata_chip_map 475 }, 476 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 477 0, 478 piixsata_chip_map 479 }, 480 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 481 0, 482 piixsata_chip_map 483 }, 484 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 485 0, 486 piix_chip_map 487 }, 488 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 489 0, 490 piixsata_chip_map 491 }, 492 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 493 0, 494 piixsata_chip_map 495 }, 496 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 497 0, 498 piixsata_chip_map 499 }, 500 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 501 0, 502 piixsata_chip_map 503 }, 504 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 505 0, 506 piixsata_chip_map 507 }, 508 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 509 0, 510 piixsata_chip_map 511 }, 512 { PCI_PRODUCT_INTEL_82801JD_SATA_1, /* Intel 82801JD (ICH10) SATA */ 513 0, 514 piixsata_chip_map 515 }, 516 { PCI_PRODUCT_INTEL_82801JD_SATA_2, /* Intel 82801JD (ICH10) SATA */ 517 0, 518 piixsata_chip_map 519 }, 520 { PCI_PRODUCT_INTEL_82801JI_SATA_1, /* Intel 82801JI (ICH10) SATA */ 521 0, 522 piixsata_chip_map 523 }, 524 { PCI_PRODUCT_INTEL_82801JI_SATA_2, /* Intel 82801JI (ICH10) SATA */ 525 0, 526 piixsata_chip_map 527 }, 528 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 529 0, 530 piixsata_chip_map 531 }, 532 { PCI_PRODUCT_INTEL_3400_SATA_1, /* Intel 3400 SATA */ 533 0, 534 piixsata_chip_map 535 }, 536 { PCI_PRODUCT_INTEL_3400_SATA_2, /* Intel 3400 SATA */ 537 0, 538 piixsata_chip_map 539 }, 540 { PCI_PRODUCT_INTEL_3400_SATA_3, /* Intel 3400 SATA */ 541 0, 542 piixsata_chip_map 543 }, 544 { PCI_PRODUCT_INTEL_3400_SATA_4, /* Intel 3400 SATA */ 545 0, 546 piixsata_chip_map 547 }, 548 { PCI_PRODUCT_INTEL_3400_SATA_5, /* Intel 3400 SATA */ 549 0, 550 piixsata_chip_map 551 }, 552 { PCI_PRODUCT_INTEL_3400_SATA_6, /* Intel 3400 SATA */ 553 0, 554 piixsata_chip_map 555 }, 556 { PCI_PRODUCT_INTEL_6SERIES_SATA_1, /* Intel 6 Series SATA */ 557 0, 558 piixsata_chip_map 559 }, 560 { PCI_PRODUCT_INTEL_6SERIES_SATA_2, /* Intel 6 Series SATA */ 561 0, 562 piixsata_chip_map 563 }, 564 { PCI_PRODUCT_INTEL_6SERIES_SATA_3, /* Intel 6 Series SATA */ 565 0, 566 piixsata_chip_map 567 }, 568 { PCI_PRODUCT_INTEL_6SERIES_SATA_4, /* Intel 6 Series SATA */ 569 0, 570 piixsata_chip_map 571 }, 572 { PCI_PRODUCT_INTEL_EP80579_SATA, /* Intel EP80579 SATA */ 573 0, 574 piixsata_chip_map 575 }, 576 { PCI_PRODUCT_INTEL_SCH_IDE, /* Intel SCH IDE */ 577 0, 578 sch_chip_map 579 } 580 }; 581 582 const struct pciide_product_desc pciide_amd_products[] = { 583 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 584 0, 585 amd756_chip_map 586 }, 587 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 588 0, 589 amd756_chip_map 590 }, 591 { PCI_PRODUCT_AMD_PBC768_IDE, 592 0, 593 amd756_chip_map 594 }, 595 { PCI_PRODUCT_AMD_8111_IDE, 596 0, 597 amd756_chip_map 598 }, 599 { PCI_PRODUCT_AMD_CS5536_IDE, 600 0, 601 amd756_chip_map 602 }, 603 { PCI_PRODUCT_AMD_HUDSON2_IDE, 604 0, 605 ixp_chip_map 606 } 607 }; 608 609 #ifdef notyet 610 const struct pciide_product_desc pciide_opti_products[] = { 611 612 { PCI_PRODUCT_OPTI_82C621, 613 0, 614 opti_chip_map 615 }, 616 { PCI_PRODUCT_OPTI_82C568, 617 0, 618 opti_chip_map 619 }, 620 { PCI_PRODUCT_OPTI_82D568, 621 0, 622 opti_chip_map 623 } 624 }; 625 #endif 626 627 const struct pciide_product_desc pciide_cmd_products[] = { 628 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 629 0, 630 cmd_chip_map 631 }, 632 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 633 0, 634 cmd0643_9_chip_map 635 }, 636 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 637 0, 638 cmd0643_9_chip_map 639 }, 640 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 641 0, 642 cmd0643_9_chip_map 643 }, 644 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 645 0, 646 cmd0643_9_chip_map 647 }, 648 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 649 IDE_PCI_CLASS_OVERRIDE, 650 cmd680_chip_map 651 }, 652 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 653 0, 654 sii3112_chip_map 655 }, 656 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 657 0, 658 sii3112_chip_map 659 }, 660 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 661 0, 662 sii3112_chip_map 663 }, 664 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 665 0, 666 sii3114_chip_map 667 } 668 }; 669 670 const struct pciide_product_desc pciide_via_products[] = { 671 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 672 0, 673 apollo_chip_map 674 }, 675 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 676 0, 677 apollo_chip_map 678 }, 679 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 680 IDE_PCI_CLASS_OVERRIDE, 681 apollo_chip_map 682 }, 683 { PCI_PRODUCT_VIATECH_VT6415, /* VIA VT6415 IDE */ 684 IDE_PCI_CLASS_OVERRIDE, 685 apollo_chip_map 686 }, 687 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 688 0, 689 apollo_chip_map 690 }, 691 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 692 0, 693 apollo_chip_map 694 }, 695 { PCI_PRODUCT_VIATECH_VX855_IDE, /* VIA VX855 IDE */ 696 0, 697 apollo_chip_map 698 }, 699 { PCI_PRODUCT_VIATECH_VX900_IDE, /* VIA VX900 IDE */ 700 0, 701 apollo_chip_map 702 }, 703 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 704 0, 705 sata_chip_map 706 }, 707 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 708 0, 709 sata_chip_map 710 }, 711 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 712 0, 713 sata_chip_map 714 }, 715 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 716 0, 717 sata_chip_map 718 }, 719 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 720 0, 721 sata_chip_map 722 }, 723 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 724 0, 725 sata_chip_map 726 } 727 }; 728 729 const struct pciide_product_desc pciide_cypress_products[] = { 730 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 731 IDE_16BIT_IOSPACE, 732 cy693_chip_map 733 } 734 }; 735 736 const struct pciide_product_desc pciide_sis_products[] = { 737 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 738 0, 739 sis_chip_map 740 }, 741 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 742 0, 743 sata_chip_map 744 }, 745 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 746 0, 747 sata_chip_map 748 }, 749 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 750 0, 751 sata_chip_map 752 } 753 }; 754 755 /* 756 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 757 * has been banished to the MD i386 pciide_machdep 758 */ 759 const struct pciide_product_desc pciide_natsemi_products[] = { 760 #ifdef __i386__ 761 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 762 0, 763 gcsc_chip_map 764 }, 765 #endif 766 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 767 0, 768 natsemi_chip_map 769 }, 770 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 771 0, 772 ns_scx200_chip_map 773 } 774 }; 775 776 const struct pciide_product_desc pciide_acer_products[] = { 777 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 778 0, 779 acer_chip_map 780 } 781 }; 782 783 const struct pciide_product_desc pciide_triones_products[] = { 784 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 785 IDE_PCI_CLASS_OVERRIDE, 786 hpt_chip_map, 787 }, 788 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 789 IDE_PCI_CLASS_OVERRIDE, 790 hpt_chip_map 791 }, 792 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 793 IDE_PCI_CLASS_OVERRIDE, 794 hpt_chip_map 795 }, 796 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 797 IDE_PCI_CLASS_OVERRIDE, 798 hpt_chip_map 799 }, 800 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 801 IDE_PCI_CLASS_OVERRIDE, 802 hpt_chip_map 803 } 804 }; 805 806 const struct pciide_product_desc pciide_promise_products[] = { 807 { PCI_PRODUCT_PROMISE_PDC20246, 808 IDE_PCI_CLASS_OVERRIDE, 809 pdc202xx_chip_map, 810 }, 811 { PCI_PRODUCT_PROMISE_PDC20262, 812 IDE_PCI_CLASS_OVERRIDE, 813 pdc202xx_chip_map, 814 }, 815 { PCI_PRODUCT_PROMISE_PDC20265, 816 IDE_PCI_CLASS_OVERRIDE, 817 pdc202xx_chip_map, 818 }, 819 { PCI_PRODUCT_PROMISE_PDC20267, 820 IDE_PCI_CLASS_OVERRIDE, 821 pdc202xx_chip_map, 822 }, 823 { PCI_PRODUCT_PROMISE_PDC20268, 824 IDE_PCI_CLASS_OVERRIDE, 825 pdc202xx_chip_map, 826 }, 827 { PCI_PRODUCT_PROMISE_PDC20268R, 828 IDE_PCI_CLASS_OVERRIDE, 829 pdc202xx_chip_map, 830 }, 831 { PCI_PRODUCT_PROMISE_PDC20269, 832 IDE_PCI_CLASS_OVERRIDE, 833 pdc202xx_chip_map, 834 }, 835 { PCI_PRODUCT_PROMISE_PDC20271, 836 IDE_PCI_CLASS_OVERRIDE, 837 pdc202xx_chip_map, 838 }, 839 { PCI_PRODUCT_PROMISE_PDC20275, 840 IDE_PCI_CLASS_OVERRIDE, 841 pdc202xx_chip_map, 842 }, 843 { PCI_PRODUCT_PROMISE_PDC20276, 844 IDE_PCI_CLASS_OVERRIDE, 845 pdc202xx_chip_map, 846 }, 847 { PCI_PRODUCT_PROMISE_PDC20277, 848 IDE_PCI_CLASS_OVERRIDE, 849 pdc202xx_chip_map, 850 }, 851 { PCI_PRODUCT_PROMISE_PDC20318, 852 IDE_PCI_CLASS_OVERRIDE, 853 pdcsata_chip_map, 854 }, 855 { PCI_PRODUCT_PROMISE_PDC20319, 856 IDE_PCI_CLASS_OVERRIDE, 857 pdcsata_chip_map, 858 }, 859 { PCI_PRODUCT_PROMISE_PDC20371, 860 IDE_PCI_CLASS_OVERRIDE, 861 pdcsata_chip_map, 862 }, 863 { PCI_PRODUCT_PROMISE_PDC20375, 864 IDE_PCI_CLASS_OVERRIDE, 865 pdcsata_chip_map, 866 }, 867 { PCI_PRODUCT_PROMISE_PDC20376, 868 IDE_PCI_CLASS_OVERRIDE, 869 pdcsata_chip_map, 870 }, 871 { PCI_PRODUCT_PROMISE_PDC20377, 872 IDE_PCI_CLASS_OVERRIDE, 873 pdcsata_chip_map, 874 }, 875 { PCI_PRODUCT_PROMISE_PDC20378, 876 IDE_PCI_CLASS_OVERRIDE, 877 pdcsata_chip_map, 878 }, 879 { PCI_PRODUCT_PROMISE_PDC20379, 880 IDE_PCI_CLASS_OVERRIDE, 881 pdcsata_chip_map, 882 }, 883 { PCI_PRODUCT_PROMISE_PDC40518, 884 IDE_PCI_CLASS_OVERRIDE, 885 pdcsata_chip_map, 886 }, 887 { PCI_PRODUCT_PROMISE_PDC40519, 888 IDE_PCI_CLASS_OVERRIDE, 889 pdcsata_chip_map, 890 }, 891 { PCI_PRODUCT_PROMISE_PDC40718, 892 IDE_PCI_CLASS_OVERRIDE, 893 pdcsata_chip_map, 894 }, 895 { PCI_PRODUCT_PROMISE_PDC40719, 896 IDE_PCI_CLASS_OVERRIDE, 897 pdcsata_chip_map, 898 }, 899 { PCI_PRODUCT_PROMISE_PDC40779, 900 IDE_PCI_CLASS_OVERRIDE, 901 pdcsata_chip_map, 902 }, 903 { PCI_PRODUCT_PROMISE_PDC20571, 904 IDE_PCI_CLASS_OVERRIDE, 905 pdcsata_chip_map, 906 }, 907 { PCI_PRODUCT_PROMISE_PDC20575, 908 IDE_PCI_CLASS_OVERRIDE, 909 pdcsata_chip_map, 910 }, 911 { PCI_PRODUCT_PROMISE_PDC20579, 912 IDE_PCI_CLASS_OVERRIDE, 913 pdcsata_chip_map, 914 }, 915 { PCI_PRODUCT_PROMISE_PDC20771, 916 IDE_PCI_CLASS_OVERRIDE, 917 pdcsata_chip_map, 918 }, 919 { PCI_PRODUCT_PROMISE_PDC20775, 920 IDE_PCI_CLASS_OVERRIDE, 921 pdcsata_chip_map, 922 } 923 }; 924 925 const struct pciide_product_desc pciide_acard_products[] = { 926 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 927 IDE_PCI_CLASS_OVERRIDE, 928 acard_chip_map, 929 }, 930 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 931 IDE_PCI_CLASS_OVERRIDE, 932 acard_chip_map, 933 }, 934 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 935 IDE_PCI_CLASS_OVERRIDE, 936 acard_chip_map, 937 }, 938 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 939 IDE_PCI_CLASS_OVERRIDE, 940 acard_chip_map, 941 }, 942 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 943 IDE_PCI_CLASS_OVERRIDE, 944 acard_chip_map, 945 } 946 }; 947 948 const struct pciide_product_desc pciide_serverworks_products[] = { 949 { PCI_PRODUCT_RCC_OSB4_IDE, 950 0, 951 serverworks_chip_map, 952 }, 953 { PCI_PRODUCT_RCC_CSB5_IDE, 954 0, 955 serverworks_chip_map, 956 }, 957 { PCI_PRODUCT_RCC_CSB6_IDE, 958 0, 959 serverworks_chip_map, 960 }, 961 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 962 0, 963 serverworks_chip_map, 964 }, 965 { PCI_PRODUCT_RCC_HT_1000_IDE, 966 0, 967 serverworks_chip_map, 968 }, 969 { PCI_PRODUCT_RCC_K2_SATA, 970 0, 971 svwsata_chip_map, 972 }, 973 { PCI_PRODUCT_RCC_FRODO4_SATA, 974 0, 975 svwsata_chip_map, 976 }, 977 { PCI_PRODUCT_RCC_FRODO8_SATA, 978 0, 979 svwsata_chip_map, 980 }, 981 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 982 0, 983 svwsata_chip_map, 984 }, 985 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 986 0, 987 svwsata_chip_map, 988 } 989 }; 990 991 const struct pciide_product_desc pciide_nvidia_products[] = { 992 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 993 0, 994 nforce_chip_map 995 }, 996 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 997 0, 998 nforce_chip_map 999 }, 1000 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 1001 0, 1002 nforce_chip_map 1003 }, 1004 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 1005 0, 1006 nforce_chip_map 1007 }, 1008 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 1009 0, 1010 nforce_chip_map 1011 }, 1012 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 1013 0, 1014 nforce_chip_map 1015 }, 1016 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 1017 0, 1018 nforce_chip_map 1019 }, 1020 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 1021 0, 1022 nforce_chip_map 1023 }, 1024 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 1025 0, 1026 nforce_chip_map 1027 }, 1028 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 1029 0, 1030 nforce_chip_map 1031 }, 1032 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 1033 0, 1034 nforce_chip_map 1035 }, 1036 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 1037 0, 1038 nforce_chip_map 1039 }, 1040 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 1041 0, 1042 nforce_chip_map 1043 }, 1044 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 1045 0, 1046 nforce_chip_map 1047 }, 1048 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 1049 0, 1050 sata_chip_map 1051 }, 1052 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 1053 0, 1054 sata_chip_map 1055 }, 1056 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 1057 0, 1058 sata_chip_map 1059 }, 1060 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 1061 0, 1062 sata_chip_map 1063 }, 1064 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 1065 0, 1066 sata_chip_map 1067 }, 1068 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 1069 0, 1070 sata_chip_map 1071 }, 1072 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 1073 0, 1074 sata_chip_map 1075 }, 1076 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 1077 0, 1078 sata_chip_map 1079 }, 1080 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1081 0, 1082 sata_chip_map 1083 }, 1084 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1085 0, 1086 sata_chip_map 1087 }, 1088 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1089 0, 1090 sata_chip_map 1091 }, 1092 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1093 0, 1094 sata_chip_map 1095 }, 1096 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1097 0, 1098 sata_chip_map 1099 }, 1100 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1101 0, 1102 sata_chip_map 1103 }, 1104 { PCI_PRODUCT_NVIDIA_MCP65_SATA, 1105 0, 1106 sata_chip_map 1107 }, 1108 { PCI_PRODUCT_NVIDIA_MCP65_SATA2, 1109 0, 1110 sata_chip_map 1111 }, 1112 { PCI_PRODUCT_NVIDIA_MCP65_SATA3, 1113 0, 1114 sata_chip_map 1115 }, 1116 { PCI_PRODUCT_NVIDIA_MCP65_SATA4, 1117 0, 1118 sata_chip_map 1119 }, 1120 { PCI_PRODUCT_NVIDIA_MCP67_SATA, 1121 0, 1122 sata_chip_map 1123 }, 1124 { PCI_PRODUCT_NVIDIA_MCP67_SATA2, 1125 0, 1126 sata_chip_map 1127 }, 1128 { PCI_PRODUCT_NVIDIA_MCP67_SATA3, 1129 0, 1130 sata_chip_map 1131 }, 1132 { PCI_PRODUCT_NVIDIA_MCP67_SATA4, 1133 0, 1134 sata_chip_map 1135 }, 1136 { PCI_PRODUCT_NVIDIA_MCP77_SATA_1, 1137 0, 1138 sata_chip_map 1139 }, 1140 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1141 0, 1142 sata_chip_map 1143 }, 1144 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1145 0, 1146 sata_chip_map 1147 }, 1148 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1149 0, 1150 sata_chip_map 1151 }, 1152 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1153 0, 1154 sata_chip_map 1155 }, 1156 { PCI_PRODUCT_NVIDIA_MCP89_SATA_1, 1157 0, 1158 sata_chip_map 1159 }, 1160 { PCI_PRODUCT_NVIDIA_MCP89_SATA_2, 1161 0, 1162 sata_chip_map 1163 }, 1164 { PCI_PRODUCT_NVIDIA_MCP89_SATA_3, 1165 0, 1166 sata_chip_map 1167 }, 1168 { PCI_PRODUCT_NVIDIA_MCP89_SATA_4, 1169 0, 1170 sata_chip_map 1171 } 1172 }; 1173 1174 const struct pciide_product_desc pciide_ite_products[] = { 1175 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1176 IDE_PCI_CLASS_OVERRIDE, 1177 ite_chip_map 1178 }, 1179 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1180 IDE_PCI_CLASS_OVERRIDE, 1181 ite_chip_map 1182 } 1183 }; 1184 1185 const struct pciide_product_desc pciide_ati_products[] = { 1186 { PCI_PRODUCT_ATI_SB200_IDE, 1187 0, 1188 ixp_chip_map 1189 }, 1190 { PCI_PRODUCT_ATI_SB300_IDE, 1191 0, 1192 ixp_chip_map 1193 }, 1194 { PCI_PRODUCT_ATI_SB400_IDE, 1195 0, 1196 ixp_chip_map 1197 }, 1198 { PCI_PRODUCT_ATI_SB600_IDE, 1199 0, 1200 ixp_chip_map 1201 }, 1202 { PCI_PRODUCT_ATI_SB700_IDE, 1203 0, 1204 ixp_chip_map 1205 }, 1206 { PCI_PRODUCT_ATI_SB300_SATA, 1207 0, 1208 sii3112_chip_map 1209 }, 1210 { PCI_PRODUCT_ATI_SB400_SATA_1, 1211 0, 1212 sii3112_chip_map 1213 }, 1214 { PCI_PRODUCT_ATI_SB400_SATA_2, 1215 0, 1216 sii3112_chip_map 1217 } 1218 }; 1219 1220 const struct pciide_product_desc pciide_jmicron_products[] = { 1221 { PCI_PRODUCT_JMICRON_JMB361, 1222 0, 1223 jmicron_chip_map 1224 }, 1225 { PCI_PRODUCT_JMICRON_JMB363, 1226 0, 1227 jmicron_chip_map 1228 }, 1229 { PCI_PRODUCT_JMICRON_JMB365, 1230 0, 1231 jmicron_chip_map 1232 }, 1233 { PCI_PRODUCT_JMICRON_JMB366, 1234 0, 1235 jmicron_chip_map 1236 }, 1237 { PCI_PRODUCT_JMICRON_JMB368, 1238 0, 1239 jmicron_chip_map 1240 } 1241 }; 1242 1243 const struct pciide_product_desc pciide_phison_products[] = { 1244 { PCI_PRODUCT_PHISON_PS5000, 1245 0, 1246 phison_chip_map 1247 }, 1248 }; 1249 1250 struct pciide_vendor_desc { 1251 u_int32_t ide_vendor; 1252 const struct pciide_product_desc *ide_products; 1253 int ide_nproducts; 1254 }; 1255 1256 const struct pciide_vendor_desc pciide_vendors[] = { 1257 { PCI_VENDOR_INTEL, pciide_intel_products, 1258 nitems(pciide_intel_products) }, 1259 { PCI_VENDOR_AMD, pciide_amd_products, 1260 nitems(pciide_amd_products) }, 1261 #ifdef notyet 1262 { PCI_VENDOR_OPTI, pciide_opti_products, 1263 nitems(pciide_opti_products) }, 1264 #endif 1265 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1266 nitems(pciide_cmd_products) }, 1267 { PCI_VENDOR_VIATECH, pciide_via_products, 1268 nitems(pciide_via_products) }, 1269 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1270 nitems(pciide_cypress_products) }, 1271 { PCI_VENDOR_SIS, pciide_sis_products, 1272 nitems(pciide_sis_products) }, 1273 { PCI_VENDOR_NS, pciide_natsemi_products, 1274 nitems(pciide_natsemi_products) }, 1275 { PCI_VENDOR_ALI, pciide_acer_products, 1276 nitems(pciide_acer_products) }, 1277 { PCI_VENDOR_TRIONES, pciide_triones_products, 1278 nitems(pciide_triones_products) }, 1279 { PCI_VENDOR_ACARD, pciide_acard_products, 1280 nitems(pciide_acard_products) }, 1281 { PCI_VENDOR_RCC, pciide_serverworks_products, 1282 nitems(pciide_serverworks_products) }, 1283 { PCI_VENDOR_PROMISE, pciide_promise_products, 1284 nitems(pciide_promise_products) }, 1285 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1286 nitems(pciide_nvidia_products) }, 1287 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1288 nitems(pciide_ite_products) }, 1289 { PCI_VENDOR_ATI, pciide_ati_products, 1290 nitems(pciide_ati_products) }, 1291 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1292 nitems(pciide_jmicron_products) }, 1293 { PCI_VENDOR_PHISON, pciide_phison_products, 1294 nitems(pciide_phison_products) } 1295 }; 1296 1297 /* options passed via the 'flags' config keyword */ 1298 #define PCIIDE_OPTIONS_DMA 0x01 1299 1300 int pciide_match(struct device *, void *, void *); 1301 void pciide_attach(struct device *, struct device *, void *); 1302 int pciide_detach(struct device *, int); 1303 int pciide_activate(struct device *, int); 1304 1305 struct cfattach pciide_pci_ca = { 1306 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1307 pciide_detach, pciide_activate 1308 }; 1309 1310 struct cfattach pciide_jmb_ca = { 1311 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1312 pciide_detach, pciide_activate 1313 }; 1314 1315 struct cfdriver pciide_cd = { 1316 NULL, "pciide", DV_DULL 1317 }; 1318 1319 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1320 1321 const struct pciide_product_desc * 1322 pciide_lookup_product(u_int32_t id) 1323 { 1324 const struct pciide_product_desc *pp; 1325 const struct pciide_vendor_desc *vp; 1326 int i; 1327 1328 for (i = 0, vp = pciide_vendors; i < nitems(pciide_vendors); vp++, i++) 1329 if (PCI_VENDOR(id) == vp->ide_vendor) 1330 break; 1331 1332 if (i == nitems(pciide_vendors)) 1333 return (NULL); 1334 1335 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1336 if (PCI_PRODUCT(id) == pp->ide_product) 1337 break; 1338 1339 if (i == vp->ide_nproducts) 1340 return (NULL); 1341 return (pp); 1342 } 1343 1344 int 1345 pciide_match(struct device *parent, void *match, void *aux) 1346 { 1347 struct pci_attach_args *pa = aux; 1348 const struct pciide_product_desc *pp; 1349 1350 /* 1351 * Some IDE controllers have severe bugs when used in PCI mode. 1352 * We punt and attach them to the ISA bus instead. 1353 */ 1354 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1355 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1356 return (0); 1357 1358 /* 1359 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1360 * controllers. Let see if we can deal with it anyway. 1361 */ 1362 pp = pciide_lookup_product(pa->pa_id); 1363 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1364 return (1); 1365 1366 /* 1367 * Check the ID register to see that it's a PCI IDE controller. 1368 * If it is, we assume that we can deal with it; it _should_ 1369 * work in a standardized way... 1370 */ 1371 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1372 switch (PCI_SUBCLASS(pa->pa_class)) { 1373 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1374 return (1); 1375 1376 /* 1377 * We only match these if we know they have 1378 * a match, as we may not support native interfaces 1379 * on them. 1380 */ 1381 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1382 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1383 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1384 if (pp) 1385 return (1); 1386 else 1387 return (0); 1388 break; 1389 } 1390 } 1391 1392 return (0); 1393 } 1394 1395 void 1396 pciide_attach(struct device *parent, struct device *self, void *aux) 1397 { 1398 struct pciide_softc *sc = (struct pciide_softc *)self; 1399 struct pci_attach_args *pa = aux; 1400 1401 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1402 if (sc->sc_pp == NULL) 1403 sc->sc_pp = &default_product_desc; 1404 sc->sc_rev = PCI_REVISION(pa->pa_class); 1405 1406 sc->sc_pc = pa->pa_pc; 1407 sc->sc_tag = pa->pa_tag; 1408 1409 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1410 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1411 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1412 1413 sc->sc_dmacmd_read = pciide_dmacmd_read; 1414 sc->sc_dmacmd_write = pciide_dmacmd_write; 1415 sc->sc_dmactl_read = pciide_dmactl_read; 1416 sc->sc_dmactl_write = pciide_dmactl_write; 1417 sc->sc_dmatbl_write = pciide_dmatbl_write; 1418 1419 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1420 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1421 1422 sc->sc_pp->chip_map(sc, pa); 1423 1424 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1425 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1426 DEBUG_PROBE); 1427 } 1428 1429 int 1430 pciide_detach(struct device *self, int flags) 1431 { 1432 struct pciide_softc *sc = (struct pciide_softc *)self; 1433 if (sc->chip_unmap == NULL) 1434 panic("unmap not yet implemented for this chipset"); 1435 else 1436 sc->chip_unmap(sc, flags); 1437 1438 return 0; 1439 } 1440 1441 int 1442 pciide_activate(struct device *self, int act) 1443 { 1444 int rv = 0; 1445 struct pciide_softc *sc = (struct pciide_softc *)self; 1446 int i; 1447 1448 switch (act) { 1449 case DVACT_QUIESCE: 1450 rv = config_activate_children(self, act); 1451 break; 1452 case DVACT_SUSPEND: 1453 rv = config_activate_children(self, act); 1454 1455 for (i = 0; i < nitems(sc->sc_save); i++) 1456 sc->sc_save[i] = pci_conf_read(sc->sc_pc, 1457 sc->sc_tag, PCI_MAPREG_END + 0x18 + (i * 4)); 1458 1459 if (sc->sc_pp->chip_map == sch_chip_map) { 1460 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1461 sc->sc_tag, SCH_D0TIM); 1462 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1463 sc->sc_tag, SCH_D1TIM); 1464 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1465 sc->sc_save2[0] = pciide_pci_read(sc->sc_pc, 1466 sc->sc_tag, ICH5_SATA_MAP); 1467 sc->sc_save2[1] = pciide_pci_read(sc->sc_pc, 1468 sc->sc_tag, ICH5_SATA_PI); 1469 sc->sc_save2[2] = pciide_pci_read(sc->sc_pc, 1470 sc->sc_tag, ICH_SATA_PCS); 1471 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1472 sc->sc_save[0] = pci_conf_read(sc->sc_pc, 1473 sc->sc_tag, SII3112_SCS_CMD); 1474 sc->sc_save[1] = pci_conf_read(sc->sc_pc, 1475 sc->sc_tag, SII3112_PCI_CFGCTL); 1476 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1477 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1478 sc->sc_tag, IT_TIM(0)); 1479 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1480 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1481 sc->sc_tag, NFORCE_PIODMATIM); 1482 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1483 sc->sc_tag, NFORCE_PIOTIM); 1484 sc->sc_save2[2] = pci_conf_read(sc->sc_pc, 1485 sc->sc_tag, NFORCE_UDMATIM); 1486 } 1487 break; 1488 case DVACT_RESUME: 1489 for (i = 0; i < nitems(sc->sc_save); i++) 1490 pci_conf_write(sc->sc_pc, sc->sc_tag, 1491 PCI_MAPREG_END + 0x18 + (i * 4), 1492 sc->sc_save[i]); 1493 1494 if (sc->sc_pp->chip_map == default_chip_map || 1495 sc->sc_pp->chip_map == sata_chip_map || 1496 sc->sc_pp->chip_map == piix_chip_map || 1497 sc->sc_pp->chip_map == amd756_chip_map || 1498 sc->sc_pp->chip_map == phison_chip_map || 1499 sc->sc_pp->chip_map == ixp_chip_map || 1500 sc->sc_pp->chip_map == acard_chip_map || 1501 sc->sc_pp->chip_map == default_chip_map || 1502 sc->sc_pp->chip_map == apollo_chip_map || 1503 sc->sc_pp->chip_map == sis_chip_map) { 1504 /* nothing to restore -- uses only 0x40 - 0x56 */ 1505 } else if (sc->sc_pp->chip_map == sch_chip_map) { 1506 pci_conf_write(sc->sc_pc, sc->sc_tag, 1507 SCH_D0TIM, sc->sc_save2[0]); 1508 pci_conf_write(sc->sc_pc, sc->sc_tag, 1509 SCH_D1TIM, sc->sc_save2[1]); 1510 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1511 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1512 ICH5_SATA_MAP, sc->sc_save2[0]); 1513 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1514 ICH5_SATA_PI, sc->sc_save2[1]); 1515 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1516 ICH_SATA_PCS, sc->sc_save2[2]); 1517 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1518 pci_conf_write(sc->sc_pc, sc->sc_tag, 1519 SII3112_SCS_CMD, sc->sc_save[0]); 1520 delay(50 * 1000); 1521 pci_conf_write(sc->sc_pc, sc->sc_tag, 1522 SII3112_PCI_CFGCTL, sc->sc_save[1]); 1523 delay(50 * 1000); 1524 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1525 pci_conf_write(sc->sc_pc, sc->sc_tag, 1526 IT_TIM(0), sc->sc_save2[0]); 1527 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1528 pci_conf_write(sc->sc_pc, sc->sc_tag, 1529 NFORCE_PIODMATIM, sc->sc_save2[0]); 1530 pci_conf_write(sc->sc_pc, sc->sc_tag, 1531 NFORCE_PIOTIM, sc->sc_save2[1]); 1532 pci_conf_write(sc->sc_pc, sc->sc_tag, 1533 NFORCE_UDMATIM, sc->sc_save2[2]); 1534 } else { 1535 printf("%s: restore for unknown chip map %x\n", 1536 sc->sc_wdcdev.sc_dev.dv_xname, 1537 sc->sc_pp->ide_product); 1538 } 1539 1540 rv = config_activate_children(self, act); 1541 break; 1542 } 1543 return (rv); 1544 } 1545 1546 int 1547 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1548 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1549 { 1550 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1551 struct channel_softc *wdc_cp = &cp->wdc_channel; 1552 pcireg_t csr; 1553 1554 cp->compat = 1; 1555 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1556 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1557 1558 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1559 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1560 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1561 1562 wdc_cp->cmd_iot = pa->pa_iot; 1563 1564 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1565 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1566 printf("%s: couldn't map %s cmd regs\n", 1567 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1568 return (0); 1569 } 1570 1571 wdc_cp->ctl_iot = pa->pa_iot; 1572 1573 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1574 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1575 printf("%s: couldn't map %s ctl regs\n", 1576 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1577 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1578 PCIIDE_COMPAT_CMD_SIZE); 1579 return (0); 1580 } 1581 wdc_cp->cmd_iosz = *cmdsizep; 1582 wdc_cp->ctl_iosz = *ctlsizep; 1583 1584 return (1); 1585 } 1586 1587 int 1588 pciide_unmapregs_compat(struct pciide_softc *sc, struct pciide_channel *cp) 1589 { 1590 struct channel_softc *wdc_cp = &cp->wdc_channel; 1591 1592 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1593 bus_space_unmap(wdc_cp->ctl_iot, wdc_cp->cmd_ioh, wdc_cp->ctl_iosz); 1594 1595 if (sc->sc_pci_ih != NULL) { 1596 pciide_machdep_compat_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1597 sc->sc_pci_ih = NULL; 1598 } 1599 1600 return (0); 1601 } 1602 1603 int 1604 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1605 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1606 { 1607 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1608 struct channel_softc *wdc_cp = &cp->wdc_channel; 1609 const char *intrstr; 1610 pci_intr_handle_t intrhandle; 1611 pcireg_t maptype; 1612 1613 cp->compat = 0; 1614 1615 if (sc->sc_pci_ih == NULL) { 1616 if (pci_intr_map(pa, &intrhandle) != 0) { 1617 printf("%s: couldn't map native-PCI interrupt\n", 1618 sc->sc_wdcdev.sc_dev.dv_xname); 1619 return (0); 1620 } 1621 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1622 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1623 intrhandle, IPL_BIO, pci_intr, sc, 1624 sc->sc_wdcdev.sc_dev.dv_xname); 1625 if (sc->sc_pci_ih != NULL) { 1626 printf("%s: using %s for native-PCI interrupt\n", 1627 sc->sc_wdcdev.sc_dev.dv_xname, 1628 intrstr ? intrstr : "unknown interrupt"); 1629 } else { 1630 printf("%s: couldn't establish native-PCI interrupt", 1631 sc->sc_wdcdev.sc_dev.dv_xname); 1632 if (intrstr != NULL) 1633 printf(" at %s", intrstr); 1634 printf("\n"); 1635 return (0); 1636 } 1637 } 1638 cp->ih = sc->sc_pci_ih; 1639 sc->sc_pc = pa->pa_pc; 1640 1641 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1642 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1643 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1644 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1645 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1646 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1647 maptype, 0, 1648 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1649 printf("%s: couldn't map %s cmd regs\n", 1650 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1651 return (0); 1652 } 1653 1654 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1655 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1656 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1657 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1658 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1659 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1660 maptype, 0, 1661 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1662 printf("%s: couldn't map %s ctl regs\n", 1663 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1664 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1665 return (0); 1666 } 1667 /* 1668 * In native mode, 4 bytes of I/O space are mapped for the control 1669 * register, the control register is at offset 2. Pass the generic 1670 * code a handle for only one byte at the right offset. 1671 */ 1672 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1673 &wdc_cp->ctl_ioh) != 0) { 1674 printf("%s: unable to subregion %s ctl regs\n", 1675 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1676 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1677 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1678 return (0); 1679 } 1680 wdc_cp->cmd_iosz = *cmdsizep; 1681 wdc_cp->ctl_iosz = *ctlsizep; 1682 1683 return (1); 1684 } 1685 1686 int 1687 pciide_unmapregs_native(struct pciide_softc *sc, struct pciide_channel *cp) 1688 { 1689 struct channel_softc *wdc_cp = &cp->wdc_channel; 1690 1691 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1692 1693 /* Unmap the whole control space, not just the sub-region */ 1694 bus_space_unmap(wdc_cp->ctl_iot, cp->ctl_baseioh, wdc_cp->ctl_iosz); 1695 1696 if (sc->sc_pci_ih != NULL) { 1697 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1698 sc->sc_pci_ih = NULL; 1699 } 1700 1701 return (0); 1702 } 1703 1704 void 1705 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1706 { 1707 pcireg_t maptype; 1708 bus_addr_t addr; 1709 1710 /* 1711 * Map DMA registers 1712 * 1713 * Note that sc_dma_ok is the right variable to test to see if 1714 * DMA can be done. If the interface doesn't support DMA, 1715 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1716 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1717 * non-zero if the interface supports DMA and the registers 1718 * could be mapped. 1719 * 1720 * XXX Note that despite the fact that the Bus Master IDE specs 1721 * XXX say that "The bus master IDE function uses 16 bytes of IO 1722 * XXX space", some controllers (at least the United 1723 * XXX Microelectronics UM8886BF) place it in memory space. 1724 */ 1725 1726 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1727 PCIIDE_REG_BUS_MASTER_DMA); 1728 1729 switch (maptype) { 1730 case PCI_MAPREG_TYPE_IO: 1731 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1732 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1733 &addr, NULL, NULL) == 0); 1734 if (sc->sc_dma_ok == 0) { 1735 printf(", unused (couldn't query registers)"); 1736 break; 1737 } 1738 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1739 && addr >= 0x10000) { 1740 sc->sc_dma_ok = 0; 1741 printf(", unused (registers at unsafe address %#lx)", addr); 1742 break; 1743 } 1744 /* FALLTHROUGH */ 1745 1746 case PCI_MAPREG_MEM_TYPE_32BIT: 1747 sc->sc_dma_ok = (pci_mapreg_map(pa, 1748 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1749 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_iosz, 1750 0) == 0); 1751 sc->sc_dmat = pa->pa_dmat; 1752 if (sc->sc_dma_ok == 0) { 1753 printf(", unused (couldn't map registers)"); 1754 } else { 1755 sc->sc_wdcdev.dma_arg = sc; 1756 sc->sc_wdcdev.dma_init = pciide_dma_init; 1757 sc->sc_wdcdev.dma_start = pciide_dma_start; 1758 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1759 } 1760 break; 1761 1762 default: 1763 sc->sc_dma_ok = 0; 1764 printf(", (unsupported maptype 0x%x)", maptype); 1765 break; 1766 } 1767 } 1768 1769 void 1770 pciide_unmapreg_dma(struct pciide_softc *sc) 1771 { 1772 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_iosz); 1773 } 1774 1775 int 1776 pciide_intr_flag(struct pciide_channel *cp) 1777 { 1778 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1779 int chan = cp->wdc_channel.channel; 1780 1781 if (cp->dma_in_progress) { 1782 int retry = 10; 1783 int status; 1784 1785 /* Check the status register */ 1786 for (retry = 10; retry > 0; retry--) { 1787 status = PCIIDE_DMACTL_READ(sc, chan); 1788 if (status & IDEDMA_CTL_INTR) { 1789 break; 1790 } 1791 DELAY(5); 1792 } 1793 1794 /* Not for us. */ 1795 if (retry == 0) 1796 return (0); 1797 1798 return (1); 1799 } 1800 1801 return (-1); 1802 } 1803 1804 int 1805 pciide_compat_intr(void *arg) 1806 { 1807 struct pciide_channel *cp = arg; 1808 1809 if (pciide_intr_flag(cp) == 0) 1810 return (0); 1811 1812 #ifdef DIAGNOSTIC 1813 /* should only be called for a compat channel */ 1814 if (cp->compat == 0) 1815 panic("pciide compat intr called for non-compat chan %p", cp); 1816 #endif 1817 return (wdcintr(&cp->wdc_channel)); 1818 } 1819 1820 int 1821 pciide_pci_intr(void *arg) 1822 { 1823 struct pciide_softc *sc = arg; 1824 struct pciide_channel *cp; 1825 struct channel_softc *wdc_cp; 1826 int i, rv, crv; 1827 1828 rv = 0; 1829 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1830 cp = &sc->pciide_channels[i]; 1831 wdc_cp = &cp->wdc_channel; 1832 1833 /* If a compat channel skip. */ 1834 if (cp->compat) 1835 continue; 1836 1837 if (pciide_intr_flag(cp) == 0) 1838 continue; 1839 1840 crv = wdcintr(wdc_cp); 1841 if (crv == 0) 1842 ; /* leave rv alone */ 1843 else if (crv == 1) 1844 rv = 1; /* claim the intr */ 1845 else if (rv == 0) /* crv should be -1 in this case */ 1846 rv = crv; /* if we've done no better, take it */ 1847 } 1848 return (rv); 1849 } 1850 1851 u_int8_t 1852 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1853 { 1854 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1855 IDEDMA_CMD(chan))); 1856 } 1857 1858 void 1859 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1860 { 1861 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1862 IDEDMA_CMD(chan), val); 1863 } 1864 1865 u_int8_t 1866 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1867 { 1868 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1869 IDEDMA_CTL(chan))); 1870 } 1871 1872 void 1873 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 1874 { 1875 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1876 IDEDMA_CTL(chan), val); 1877 } 1878 1879 void 1880 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 1881 { 1882 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1883 IDEDMA_TBL(chan), val); 1884 } 1885 1886 void 1887 pciide_channel_dma_setup(struct pciide_channel *cp) 1888 { 1889 int drive; 1890 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1891 struct ata_drive_datas *drvp; 1892 1893 for (drive = 0; drive < 2; drive++) { 1894 drvp = &cp->wdc_channel.ch_drive[drive]; 1895 /* If no drive, skip */ 1896 if ((drvp->drive_flags & DRIVE) == 0) 1897 continue; 1898 /* setup DMA if needed */ 1899 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1900 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1901 sc->sc_dma_ok == 0) { 1902 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1903 continue; 1904 } 1905 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1906 != 0) { 1907 /* Abort DMA setup */ 1908 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1909 continue; 1910 } 1911 } 1912 } 1913 1914 int 1915 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 1916 { 1917 bus_dma_segment_t seg; 1918 int error, rseg; 1919 const bus_size_t dma_table_size = 1920 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1921 struct pciide_dma_maps *dma_maps = 1922 &sc->pciide_channels[channel].dma_maps[drive]; 1923 1924 /* If table was already allocated, just return */ 1925 if (dma_maps->dma_table) 1926 return (0); 1927 1928 /* Allocate memory for the DMA tables and map it */ 1929 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1930 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1931 BUS_DMA_NOWAIT)) != 0) { 1932 printf("%s:%d: unable to allocate table DMA for " 1933 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1934 channel, drive, error); 1935 return (error); 1936 } 1937 1938 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1939 dma_table_size, 1940 (caddr_t *)&dma_maps->dma_table, 1941 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1942 printf("%s:%d: unable to map table DMA for" 1943 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1944 channel, drive, error); 1945 return (error); 1946 } 1947 1948 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 1949 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 1950 seg.ds_addr), DEBUG_PROBE); 1951 1952 /* Create and load table DMA map for this disk */ 1953 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1954 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1955 &dma_maps->dmamap_table)) != 0) { 1956 printf("%s:%d: unable to create table DMA map for " 1957 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1958 channel, drive, error); 1959 return (error); 1960 } 1961 if ((error = bus_dmamap_load(sc->sc_dmat, 1962 dma_maps->dmamap_table, 1963 dma_maps->dma_table, 1964 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1965 printf("%s:%d: unable to load table DMA map for " 1966 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1967 channel, drive, error); 1968 return (error); 1969 } 1970 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1971 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 1972 /* Create a xfer DMA map for this drive */ 1973 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1974 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 1975 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1976 &dma_maps->dmamap_xfer)) != 0) { 1977 printf("%s:%d: unable to create xfer DMA map for " 1978 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1979 channel, drive, error); 1980 return (error); 1981 } 1982 return (0); 1983 } 1984 1985 int 1986 pciide_dma_init(void *v, int channel, int drive, void *databuf, 1987 size_t datalen, int flags) 1988 { 1989 struct pciide_softc *sc = v; 1990 int error, seg; 1991 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1992 struct pciide_dma_maps *dma_maps = 1993 &sc->pciide_channels[channel].dma_maps[drive]; 1994 #ifndef BUS_DMA_RAW 1995 #define BUS_DMA_RAW 0 1996 #endif 1997 1998 error = bus_dmamap_load(sc->sc_dmat, 1999 dma_maps->dmamap_xfer, 2000 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 2001 if (error) { 2002 printf("%s:%d: unable to load xfer DMA map for " 2003 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2004 channel, drive, error); 2005 return (error); 2006 } 2007 2008 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2009 dma_maps->dmamap_xfer->dm_mapsize, 2010 (flags & WDC_DMA_READ) ? 2011 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2012 2013 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 2014 #ifdef DIAGNOSTIC 2015 /* A segment must not cross a 64k boundary */ 2016 { 2017 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 2018 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 2019 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 2020 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 2021 printf("pciide_dma: segment %d physical addr 0x%lx" 2022 " len 0x%lx not properly aligned\n", 2023 seg, phys, len); 2024 panic("pciide_dma: buf align"); 2025 } 2026 } 2027 #endif 2028 dma_maps->dma_table[seg].base_addr = 2029 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 2030 dma_maps->dma_table[seg].byte_count = 2031 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 2032 IDEDMA_BYTE_COUNT_MASK); 2033 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 2034 seg, letoh32(dma_maps->dma_table[seg].byte_count), 2035 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 2036 2037 } 2038 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 2039 htole32(IDEDMA_BYTE_COUNT_EOT); 2040 2041 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 2042 dma_maps->dmamap_table->dm_mapsize, 2043 BUS_DMASYNC_PREWRITE); 2044 2045 /* Maps are ready. Start DMA function */ 2046 #ifdef DIAGNOSTIC 2047 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 2048 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 2049 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2050 panic("pciide_dma_init: table align"); 2051 } 2052 #endif 2053 2054 /* Clear status bits */ 2055 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 2056 /* Write table addr */ 2057 PCIIDE_DMATBL_WRITE(sc, channel, 2058 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2059 /* set read/write */ 2060 PCIIDE_DMACMD_WRITE(sc, channel, 2061 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 2062 /* remember flags */ 2063 dma_maps->dma_flags = flags; 2064 return (0); 2065 } 2066 2067 void 2068 pciide_dma_start(void *v, int channel, int drive) 2069 { 2070 struct pciide_softc *sc = v; 2071 2072 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 2073 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 2074 IDEDMA_CMD_START); 2075 2076 sc->pciide_channels[channel].dma_in_progress = 1; 2077 } 2078 2079 int 2080 pciide_dma_finish(void *v, int channel, int drive, int force) 2081 { 2082 struct pciide_softc *sc = v; 2083 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2084 u_int8_t status; 2085 int error = 0; 2086 struct pciide_dma_maps *dma_maps = 2087 &sc->pciide_channels[channel].dma_maps[drive]; 2088 2089 status = PCIIDE_DMACTL_READ(sc, channel); 2090 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 2091 DEBUG_XFERS); 2092 if (status == 0xff) 2093 return (status); 2094 2095 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 2096 error = WDC_DMAST_NOIRQ; 2097 goto done; 2098 } 2099 2100 /* stop DMA channel */ 2101 PCIIDE_DMACMD_WRITE(sc, channel, 2102 ((dma_maps->dma_flags & WDC_DMA_READ) ? 2103 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 2104 2105 /* Unload the map of the data buffer */ 2106 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2107 dma_maps->dmamap_xfer->dm_mapsize, 2108 (dma_maps->dma_flags & WDC_DMA_READ) ? 2109 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2110 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 2111 2112 /* Clear status bits */ 2113 PCIIDE_DMACTL_WRITE(sc, channel, status); 2114 2115 if ((status & IDEDMA_CTL_ERR) != 0) { 2116 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 2117 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 2118 error |= WDC_DMAST_ERR; 2119 } 2120 2121 if ((status & IDEDMA_CTL_INTR) == 0) { 2122 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 2123 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 2124 drive, status); 2125 error |= WDC_DMAST_NOIRQ; 2126 } 2127 2128 if ((status & IDEDMA_CTL_ACT) != 0) { 2129 /* data underrun, may be a valid condition for ATAPI */ 2130 error |= WDC_DMAST_UNDER; 2131 } 2132 2133 done: 2134 sc->pciide_channels[channel].dma_in_progress = 0; 2135 return (error); 2136 } 2137 2138 void 2139 pciide_irqack(struct channel_softc *chp) 2140 { 2141 struct pciide_channel *cp = (struct pciide_channel *)chp; 2142 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2143 int chan = chp->channel; 2144 2145 /* clear status bits in IDE DMA registers */ 2146 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 2147 } 2148 2149 /* some common code used by several chip_map */ 2150 int 2151 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 2152 { 2153 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2154 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2155 cp->name = PCIIDE_CHANNEL_NAME(channel); 2156 cp->wdc_channel.channel = channel; 2157 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2158 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 2159 if (cp->wdc_channel.ch_queue == NULL) { 2160 printf("%s: %s " 2161 "cannot allocate channel queue", 2162 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2163 return (0); 2164 } 2165 cp->hw_ok = 1; 2166 2167 return (1); 2168 } 2169 2170 void 2171 pciide_chanfree(struct pciide_softc *sc, int channel) 2172 { 2173 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2174 if (cp->wdc_channel.ch_queue) 2175 wdc_free_queue(cp->wdc_channel.ch_queue); 2176 } 2177 2178 /* some common code used by several chip channel_map */ 2179 void 2180 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 2181 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 2182 int (*pci_intr)(void *)) 2183 { 2184 struct channel_softc *wdc_cp = &cp->wdc_channel; 2185 2186 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 2187 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 2188 pci_intr); 2189 else 2190 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2191 wdc_cp->channel, cmdsizep, ctlsizep); 2192 if (cp->hw_ok == 0) 2193 return; 2194 wdc_cp->data32iot = wdc_cp->cmd_iot; 2195 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 2196 wdcattach(wdc_cp); 2197 } 2198 2199 void 2200 pciide_unmap_chan(struct pciide_softc *sc, struct pciide_channel *cp, int flags) 2201 { 2202 struct channel_softc *wdc_cp = &cp->wdc_channel; 2203 2204 wdcdetach(wdc_cp, flags); 2205 2206 if (cp->compat != 0) 2207 pciide_unmapregs_compat(sc, cp); 2208 else 2209 pciide_unmapregs_native(sc, cp); 2210 } 2211 2212 /* 2213 * Generic code to call to know if a channel can be disabled. Return 1 2214 * if channel can be disabled, 0 if not 2215 */ 2216 int 2217 pciide_chan_candisable(struct pciide_channel *cp) 2218 { 2219 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2220 struct channel_softc *wdc_cp = &cp->wdc_channel; 2221 2222 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 2223 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 2224 printf("%s: %s disabled (no drives)\n", 2225 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2226 cp->hw_ok = 0; 2227 return (1); 2228 } 2229 return (0); 2230 } 2231 2232 /* 2233 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 2234 * Set hw_ok=0 on failure 2235 */ 2236 void 2237 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2238 int compatchan, int interface) 2239 { 2240 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2241 struct channel_softc *wdc_cp = &cp->wdc_channel; 2242 2243 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2244 return; 2245 2246 cp->compat = 1; 2247 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 2248 pa, compatchan, pciide_compat_intr, cp); 2249 if (cp->ih == NULL) { 2250 printf("%s: no compatibility interrupt for use by %s\n", 2251 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2252 cp->hw_ok = 0; 2253 } 2254 } 2255 2256 /* 2257 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 2258 * Set hw_ok=0 on failure 2259 */ 2260 void 2261 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2262 int compatchan, int interface) 2263 { 2264 struct channel_softc *wdc_cp = &cp->wdc_channel; 2265 2266 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2267 return; 2268 2269 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 2270 } 2271 2272 void 2273 pciide_print_channels(int nchannels, pcireg_t interface) 2274 { 2275 int i; 2276 2277 for (i = 0; i < nchannels; i++) { 2278 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 2279 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 2280 "configured" : "wired", 2281 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 2282 "compatibility"); 2283 } 2284 2285 printf("\n"); 2286 } 2287 2288 void 2289 pciide_print_modes(struct pciide_channel *cp) 2290 { 2291 wdc_print_current_modes(&cp->wdc_channel); 2292 } 2293 2294 void 2295 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2296 { 2297 struct pciide_channel *cp; 2298 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2299 pcireg_t csr; 2300 int channel, drive; 2301 struct ata_drive_datas *drvp; 2302 u_int8_t idedma_ctl; 2303 bus_size_t cmdsize, ctlsize; 2304 char *failreason; 2305 2306 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2307 printf(": DMA"); 2308 if (sc->sc_pp == &default_product_desc && 2309 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2310 PCIIDE_OPTIONS_DMA) == 0) { 2311 printf(" (unsupported)"); 2312 sc->sc_dma_ok = 0; 2313 } else { 2314 pciide_mapreg_dma(sc, pa); 2315 if (sc->sc_dma_ok != 0) 2316 printf(", (partial support)"); 2317 } 2318 } else { 2319 printf(": no DMA"); 2320 sc->sc_dma_ok = 0; 2321 } 2322 if (sc->sc_dma_ok) { 2323 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2324 sc->sc_wdcdev.irqack = pciide_irqack; 2325 } 2326 sc->sc_wdcdev.PIO_cap = 0; 2327 sc->sc_wdcdev.DMA_cap = 0; 2328 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2329 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2330 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2331 2332 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2333 2334 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2335 cp = &sc->pciide_channels[channel]; 2336 if (pciide_chansetup(sc, channel, interface) == 0) 2337 continue; 2338 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2339 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2340 &ctlsize, pciide_pci_intr); 2341 } else { 2342 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2343 channel, &cmdsize, &ctlsize); 2344 } 2345 if (cp->hw_ok == 0) 2346 continue; 2347 /* 2348 * Check to see if something appears to be there. 2349 */ 2350 failreason = NULL; 2351 pciide_map_compat_intr(pa, cp, channel, interface); 2352 if (cp->hw_ok == 0) 2353 continue; 2354 if (!wdcprobe(&cp->wdc_channel)) { 2355 failreason = "not responding; disabled or no drives?"; 2356 goto next; 2357 } 2358 /* 2359 * Now, make sure it's actually attributable to this PCI IDE 2360 * channel by trying to access the channel again while the 2361 * PCI IDE controller's I/O space is disabled. (If the 2362 * channel no longer appears to be there, it belongs to 2363 * this controller.) YUCK! 2364 */ 2365 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2366 PCI_COMMAND_STATUS_REG); 2367 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2368 csr & ~PCI_COMMAND_IO_ENABLE); 2369 if (wdcprobe(&cp->wdc_channel)) 2370 failreason = "other hardware responding at addresses"; 2371 pci_conf_write(sc->sc_pc, sc->sc_tag, 2372 PCI_COMMAND_STATUS_REG, csr); 2373 next: 2374 if (failreason) { 2375 printf("%s: %s ignored (%s)\n", 2376 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2377 failreason); 2378 cp->hw_ok = 0; 2379 pciide_unmap_compat_intr(pa, cp, channel, interface); 2380 bus_space_unmap(cp->wdc_channel.cmd_iot, 2381 cp->wdc_channel.cmd_ioh, cmdsize); 2382 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2383 bus_space_unmap(cp->wdc_channel.ctl_iot, 2384 cp->ctl_baseioh, ctlsize); 2385 else 2386 bus_space_unmap(cp->wdc_channel.ctl_iot, 2387 cp->wdc_channel.ctl_ioh, ctlsize); 2388 } 2389 if (cp->hw_ok) { 2390 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2391 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2392 wdcattach(&cp->wdc_channel); 2393 } 2394 } 2395 2396 if (sc->sc_dma_ok == 0) 2397 return; 2398 2399 /* Allocate DMA maps */ 2400 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2401 idedma_ctl = 0; 2402 cp = &sc->pciide_channels[channel]; 2403 for (drive = 0; drive < 2; drive++) { 2404 drvp = &cp->wdc_channel.ch_drive[drive]; 2405 /* If no drive, skip */ 2406 if ((drvp->drive_flags & DRIVE) == 0) 2407 continue; 2408 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2409 continue; 2410 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2411 /* Abort DMA setup */ 2412 printf("%s:%d:%d: cannot allocate DMA maps, " 2413 "using PIO transfers\n", 2414 sc->sc_wdcdev.sc_dev.dv_xname, 2415 channel, drive); 2416 drvp->drive_flags &= ~DRIVE_DMA; 2417 } 2418 printf("%s:%d:%d: using DMA data transfers\n", 2419 sc->sc_wdcdev.sc_dev.dv_xname, 2420 channel, drive); 2421 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2422 } 2423 if (idedma_ctl != 0) { 2424 /* Add software bits in status register */ 2425 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2426 } 2427 } 2428 } 2429 2430 void 2431 default_chip_unmap(struct pciide_softc *sc, int flags) 2432 { 2433 struct pciide_channel *cp; 2434 int channel; 2435 2436 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2437 cp = &sc->pciide_channels[channel]; 2438 pciide_unmap_chan(sc, cp, flags); 2439 pciide_chanfree(sc, channel); 2440 } 2441 2442 pciide_unmapreg_dma(sc); 2443 2444 if (sc->sc_cookie) 2445 free(sc->sc_cookie, M_DEVBUF); 2446 } 2447 2448 void 2449 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2450 { 2451 struct pciide_channel *cp; 2452 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2453 int channel; 2454 bus_size_t cmdsize, ctlsize; 2455 2456 if (interface == 0) { 2457 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2458 DEBUG_PROBE); 2459 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2460 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2461 } 2462 2463 printf(": DMA"); 2464 pciide_mapreg_dma(sc, pa); 2465 printf("\n"); 2466 2467 if (sc->sc_dma_ok) { 2468 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2469 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2470 sc->sc_wdcdev.irqack = pciide_irqack; 2471 } 2472 sc->sc_wdcdev.PIO_cap = 4; 2473 sc->sc_wdcdev.DMA_cap = 2; 2474 sc->sc_wdcdev.UDMA_cap = 6; 2475 2476 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2477 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2478 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2479 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2480 sc->sc_wdcdev.set_modes = sata_setup_channel; 2481 sc->chip_unmap = default_chip_unmap; 2482 2483 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2484 cp = &sc->pciide_channels[channel]; 2485 if (pciide_chansetup(sc, channel, interface) == 0) 2486 continue; 2487 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2488 pciide_pci_intr); 2489 sata_setup_channel(&cp->wdc_channel); 2490 } 2491 } 2492 2493 void 2494 sata_setup_channel(struct channel_softc *chp) 2495 { 2496 struct ata_drive_datas *drvp; 2497 int drive; 2498 u_int32_t idedma_ctl; 2499 struct pciide_channel *cp = (struct pciide_channel *)chp; 2500 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2501 2502 /* setup DMA if needed */ 2503 pciide_channel_dma_setup(cp); 2504 2505 idedma_ctl = 0; 2506 2507 for (drive = 0; drive < 2; drive++) { 2508 drvp = &chp->ch_drive[drive]; 2509 /* If no drive, skip */ 2510 if ((drvp->drive_flags & DRIVE) == 0) 2511 continue; 2512 if (drvp->drive_flags & DRIVE_UDMA) { 2513 /* use Ultra/DMA */ 2514 drvp->drive_flags &= ~DRIVE_DMA; 2515 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2516 } else if (drvp->drive_flags & DRIVE_DMA) { 2517 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2518 } 2519 } 2520 2521 /* 2522 * Nothing to do to setup modes; it is meaningless in S-ATA 2523 * (but many S-ATA drives still want to get the SET_FEATURE 2524 * command). 2525 */ 2526 if (idedma_ctl != 0) { 2527 /* Add software bits in status register */ 2528 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2529 } 2530 pciide_print_modes(cp); 2531 } 2532 2533 void 2534 piix_timing_debug(struct pciide_softc *sc) 2535 { 2536 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2537 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2538 DEBUG_PROBE); 2539 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2540 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2541 WDCDEBUG_PRINT((", sidetim=0x%x", 2542 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2543 DEBUG_PROBE); 2544 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2545 WDCDEBUG_PRINT((", udmareg 0x%x", 2546 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2547 DEBUG_PROBE); 2548 } 2549 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2550 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2551 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2552 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2553 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2554 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2555 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2556 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2557 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2558 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2559 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2560 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2561 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2562 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2563 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2564 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2565 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2566 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2567 DEBUG_PROBE); 2568 } 2569 } 2570 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2571 } 2572 2573 void 2574 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2575 { 2576 struct pciide_channel *cp; 2577 int channel; 2578 u_int32_t idetim; 2579 bus_size_t cmdsize, ctlsize; 2580 2581 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2582 2583 printf(": DMA"); 2584 pciide_mapreg_dma(sc, pa); 2585 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2586 WDC_CAPABILITY_MODE; 2587 if (sc->sc_dma_ok) { 2588 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2589 sc->sc_wdcdev.irqack = pciide_irqack; 2590 switch (sc->sc_pp->ide_product) { 2591 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2592 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2593 case PCI_PRODUCT_INTEL_82371AB_IDE: 2594 case PCI_PRODUCT_INTEL_82372FB_IDE: 2595 case PCI_PRODUCT_INTEL_82440MX_IDE: 2596 case PCI_PRODUCT_INTEL_82451NX: 2597 case PCI_PRODUCT_INTEL_82801AA_IDE: 2598 case PCI_PRODUCT_INTEL_82801AB_IDE: 2599 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2600 case PCI_PRODUCT_INTEL_82801BA_IDE: 2601 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2602 case PCI_PRODUCT_INTEL_82801CA_IDE: 2603 case PCI_PRODUCT_INTEL_82801DB_IDE: 2604 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2605 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2606 case PCI_PRODUCT_INTEL_82801EB_IDE: 2607 case PCI_PRODUCT_INTEL_82801FB_IDE: 2608 case PCI_PRODUCT_INTEL_82801GB_IDE: 2609 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2610 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2611 break; 2612 } 2613 } 2614 sc->sc_wdcdev.PIO_cap = 4; 2615 sc->sc_wdcdev.DMA_cap = 2; 2616 switch (sc->sc_pp->ide_product) { 2617 case PCI_PRODUCT_INTEL_82801AA_IDE: 2618 case PCI_PRODUCT_INTEL_82372FB_IDE: 2619 sc->sc_wdcdev.UDMA_cap = 4; 2620 break; 2621 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2622 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2623 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2624 case PCI_PRODUCT_INTEL_82801BA_IDE: 2625 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2626 case PCI_PRODUCT_INTEL_82801CA_IDE: 2627 case PCI_PRODUCT_INTEL_82801DB_IDE: 2628 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2629 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2630 case PCI_PRODUCT_INTEL_82801EB_IDE: 2631 case PCI_PRODUCT_INTEL_82801FB_IDE: 2632 case PCI_PRODUCT_INTEL_82801GB_IDE: 2633 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2634 sc->sc_wdcdev.UDMA_cap = 5; 2635 break; 2636 default: 2637 sc->sc_wdcdev.UDMA_cap = 2; 2638 break; 2639 } 2640 2641 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2642 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2643 sc->sc_wdcdev.set_modes = piix_setup_channel; 2644 } else { 2645 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2646 } 2647 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2648 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2649 2650 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2651 2652 piix_timing_debug(sc); 2653 2654 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2655 cp = &sc->pciide_channels[channel]; 2656 2657 if (pciide_chansetup(sc, channel, interface) == 0) 2658 continue; 2659 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2660 if ((PIIX_IDETIM_READ(idetim, channel) & 2661 PIIX_IDETIM_IDE) == 0) { 2662 printf("%s: %s ignored (disabled)\n", 2663 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2664 continue; 2665 } 2666 pciide_map_compat_intr(pa, cp, channel, interface); 2667 if (cp->hw_ok == 0) 2668 continue; 2669 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2670 pciide_pci_intr); 2671 if (cp->hw_ok == 0) 2672 goto next; 2673 if (pciide_chan_candisable(cp)) { 2674 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2675 channel); 2676 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2677 idetim); 2678 } 2679 if (cp->hw_ok == 0) 2680 goto next; 2681 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2682 next: 2683 if (cp->hw_ok == 0) 2684 pciide_unmap_compat_intr(pa, cp, channel, interface); 2685 } 2686 2687 piix_timing_debug(sc); 2688 } 2689 2690 void 2691 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2692 { 2693 struct pciide_channel *cp; 2694 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2695 int channel; 2696 bus_size_t cmdsize, ctlsize; 2697 u_int8_t reg, ich = 0; 2698 2699 printf(": DMA"); 2700 pciide_mapreg_dma(sc, pa); 2701 2702 if (sc->sc_dma_ok) { 2703 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2704 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2705 sc->sc_wdcdev.irqack = pciide_irqack; 2706 sc->sc_wdcdev.DMA_cap = 2; 2707 sc->sc_wdcdev.UDMA_cap = 6; 2708 } 2709 sc->sc_wdcdev.PIO_cap = 4; 2710 2711 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2712 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2713 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2714 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2715 sc->sc_wdcdev.set_modes = sata_setup_channel; 2716 2717 switch(sc->sc_pp->ide_product) { 2718 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2719 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2720 case PCI_PRODUCT_INTEL_82801EB_SATA: 2721 case PCI_PRODUCT_INTEL_82801ER_SATA: 2722 ich = 5; 2723 break; 2724 case PCI_PRODUCT_INTEL_82801FB_SATA: 2725 case PCI_PRODUCT_INTEL_82801FR_SATA: 2726 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2727 ich = 6; 2728 break; 2729 default: 2730 ich = 7; 2731 break; 2732 } 2733 2734 /* 2735 * Put the SATA portion of controllers that don't operate in combined 2736 * mode into native PCI modes so the maximum number of devices can be 2737 * used. Intel calls this "enhanced mode" 2738 */ 2739 if (ich == 5) { 2740 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2741 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2742 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2743 ICH5_SATA_PI); 2744 reg |= ICH5_SATA_PI_PRI_NATIVE | 2745 ICH5_SATA_PI_SEC_NATIVE; 2746 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2747 ICH5_SATA_PI, reg); 2748 interface |= PCIIDE_INTERFACE_PCI(0) | 2749 PCIIDE_INTERFACE_PCI(1); 2750 } 2751 } else { 2752 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2753 ICH6_SATA_MAP_CMB_MASK; 2754 if (reg != ICH6_SATA_MAP_CMB_PRI && 2755 reg != ICH6_SATA_MAP_CMB_SEC) { 2756 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2757 ICH5_SATA_PI); 2758 reg |= ICH5_SATA_PI_PRI_NATIVE | 2759 ICH5_SATA_PI_SEC_NATIVE; 2760 2761 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2762 ICH5_SATA_PI, reg); 2763 interface |= PCIIDE_INTERFACE_PCI(0) | 2764 PCIIDE_INTERFACE_PCI(1); 2765 2766 /* 2767 * Ask for SATA IDE Mode, we don't need to do this 2768 * for the combined mode case as combined mode is 2769 * only allowed in IDE Mode 2770 */ 2771 if (ich >= 7) { 2772 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2773 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2774 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2775 ICH5_SATA_MAP, reg); 2776 } 2777 } 2778 } 2779 2780 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2781 2782 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2783 cp = &sc->pciide_channels[channel]; 2784 if (pciide_chansetup(sc, channel, interface) == 0) 2785 continue; 2786 2787 pciide_map_compat_intr(pa, cp, channel, interface); 2788 if (cp->hw_ok == 0) 2789 continue; 2790 2791 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2792 pciide_pci_intr); 2793 if (cp->hw_ok != 0) 2794 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2795 2796 if (cp->hw_ok == 0) 2797 pciide_unmap_compat_intr(pa, cp, channel, interface); 2798 } 2799 } 2800 2801 void 2802 piix_setup_channel(struct channel_softc *chp) 2803 { 2804 u_int8_t mode[2], drive; 2805 u_int32_t oidetim, idetim, idedma_ctl; 2806 struct pciide_channel *cp = (struct pciide_channel *)chp; 2807 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2808 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2809 2810 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2811 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2812 idedma_ctl = 0; 2813 2814 /* set up new idetim: Enable IDE registers decode */ 2815 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2816 chp->channel); 2817 2818 /* setup DMA */ 2819 pciide_channel_dma_setup(cp); 2820 2821 /* 2822 * Here we have to mess up with drives mode: PIIX can't have 2823 * different timings for master and slave drives. 2824 * We need to find the best combination. 2825 */ 2826 2827 /* If both drives supports DMA, take the lower mode */ 2828 if ((drvp[0].drive_flags & DRIVE_DMA) && 2829 (drvp[1].drive_flags & DRIVE_DMA)) { 2830 mode[0] = mode[1] = 2831 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2832 drvp[0].DMA_mode = mode[0]; 2833 drvp[1].DMA_mode = mode[1]; 2834 goto ok; 2835 } 2836 /* 2837 * If only one drive supports DMA, use its mode, and 2838 * put the other one in PIO mode 0 if mode not compatible 2839 */ 2840 if (drvp[0].drive_flags & DRIVE_DMA) { 2841 mode[0] = drvp[0].DMA_mode; 2842 mode[1] = drvp[1].PIO_mode; 2843 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2844 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2845 mode[1] = drvp[1].PIO_mode = 0; 2846 goto ok; 2847 } 2848 if (drvp[1].drive_flags & DRIVE_DMA) { 2849 mode[1] = drvp[1].DMA_mode; 2850 mode[0] = drvp[0].PIO_mode; 2851 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2852 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2853 mode[0] = drvp[0].PIO_mode = 0; 2854 goto ok; 2855 } 2856 /* 2857 * If both drives are not DMA, takes the lower mode, unless 2858 * one of them is PIO mode < 2 2859 */ 2860 if (drvp[0].PIO_mode < 2) { 2861 mode[0] = drvp[0].PIO_mode = 0; 2862 mode[1] = drvp[1].PIO_mode; 2863 } else if (drvp[1].PIO_mode < 2) { 2864 mode[1] = drvp[1].PIO_mode = 0; 2865 mode[0] = drvp[0].PIO_mode; 2866 } else { 2867 mode[0] = mode[1] = 2868 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 2869 drvp[0].PIO_mode = mode[0]; 2870 drvp[1].PIO_mode = mode[1]; 2871 } 2872 ok: /* The modes are setup */ 2873 for (drive = 0; drive < 2; drive++) { 2874 if (drvp[drive].drive_flags & DRIVE_DMA) { 2875 idetim |= piix_setup_idetim_timings( 2876 mode[drive], 1, chp->channel); 2877 goto end; 2878 } 2879 } 2880 /* If we are there, none of the drives are DMA */ 2881 if (mode[0] >= 2) 2882 idetim |= piix_setup_idetim_timings( 2883 mode[0], 0, chp->channel); 2884 else 2885 idetim |= piix_setup_idetim_timings( 2886 mode[1], 0, chp->channel); 2887 end: /* 2888 * timing mode is now set up in the controller. Enable 2889 * it per-drive 2890 */ 2891 for (drive = 0; drive < 2; drive++) { 2892 /* If no drive, skip */ 2893 if ((drvp[drive].drive_flags & DRIVE) == 0) 2894 continue; 2895 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 2896 if (drvp[drive].drive_flags & DRIVE_DMA) 2897 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2898 } 2899 if (idedma_ctl != 0) { 2900 /* Add software bits in status register */ 2901 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2902 IDEDMA_CTL(chp->channel), 2903 idedma_ctl); 2904 } 2905 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2906 pciide_print_modes(cp); 2907 } 2908 2909 void 2910 piix3_4_setup_channel(struct channel_softc *chp) 2911 { 2912 struct ata_drive_datas *drvp; 2913 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 2914 struct pciide_channel *cp = (struct pciide_channel *)chp; 2915 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2916 int drive; 2917 int channel = chp->channel; 2918 2919 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2920 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 2921 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 2922 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 2923 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 2924 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 2925 PIIX_SIDETIM_RTC_MASK(channel)); 2926 2927 idedma_ctl = 0; 2928 /* If channel disabled, no need to go further */ 2929 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 2930 return; 2931 /* set up new idetim: Enable IDE registers decode */ 2932 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 2933 2934 /* setup DMA if needed */ 2935 pciide_channel_dma_setup(cp); 2936 2937 for (drive = 0; drive < 2; drive++) { 2938 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 2939 PIIX_UDMATIM_SET(0x3, channel, drive)); 2940 drvp = &chp->ch_drive[drive]; 2941 /* If no drive, skip */ 2942 if ((drvp->drive_flags & DRIVE) == 0) 2943 continue; 2944 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2945 (drvp->drive_flags & DRIVE_UDMA) == 0)) 2946 goto pio; 2947 2948 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2949 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2950 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2951 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2952 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2953 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2954 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2955 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2956 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2957 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2958 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2959 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2960 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2961 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2962 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2963 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2964 ideconf |= PIIX_CONFIG_PINGPONG; 2965 } 2966 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2967 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2968 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2969 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 2970 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 2971 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2972 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2973 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2974 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2975 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2976 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2977 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2978 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 2979 /* setup Ultra/100 */ 2980 if (drvp->UDMA_mode > 2 && 2981 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2982 drvp->UDMA_mode = 2; 2983 if (drvp->UDMA_mode > 4) { 2984 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 2985 } else { 2986 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 2987 if (drvp->UDMA_mode > 2) { 2988 ideconf |= PIIX_CONFIG_UDMA66(channel, 2989 drive); 2990 } else { 2991 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 2992 drive); 2993 } 2994 } 2995 } 2996 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2997 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2998 /* setup Ultra/66 */ 2999 if (drvp->UDMA_mode > 2 && 3000 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 3001 drvp->UDMA_mode = 2; 3002 if (drvp->UDMA_mode > 2) 3003 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 3004 else 3005 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 3006 } 3007 3008 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3009 (drvp->drive_flags & DRIVE_UDMA)) { 3010 /* use Ultra/DMA */ 3011 drvp->drive_flags &= ~DRIVE_DMA; 3012 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 3013 udmareg |= PIIX_UDMATIM_SET( 3014 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 3015 } else { 3016 /* use Multiword DMA */ 3017 drvp->drive_flags &= ~DRIVE_UDMA; 3018 if (drive == 0) { 3019 idetim |= piix_setup_idetim_timings( 3020 drvp->DMA_mode, 1, channel); 3021 } else { 3022 sidetim |= piix_setup_sidetim_timings( 3023 drvp->DMA_mode, 1, channel); 3024 idetim = PIIX_IDETIM_SET(idetim, 3025 PIIX_IDETIM_SITRE, channel); 3026 } 3027 } 3028 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3029 3030 pio: /* use PIO mode */ 3031 idetim |= piix_setup_idetim_drvs(drvp); 3032 if (drive == 0) { 3033 idetim |= piix_setup_idetim_timings( 3034 drvp->PIO_mode, 0, channel); 3035 } else { 3036 sidetim |= piix_setup_sidetim_timings( 3037 drvp->PIO_mode, 0, channel); 3038 idetim = PIIX_IDETIM_SET(idetim, 3039 PIIX_IDETIM_SITRE, channel); 3040 } 3041 } 3042 if (idedma_ctl != 0) { 3043 /* Add software bits in status register */ 3044 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3045 IDEDMA_CTL(channel), 3046 idedma_ctl); 3047 } 3048 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 3049 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 3050 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 3051 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 3052 pciide_print_modes(cp); 3053 } 3054 3055 3056 /* setup ISP and RTC fields, based on mode */ 3057 u_int32_t 3058 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3059 { 3060 3061 if (dma) 3062 return (PIIX_IDETIM_SET(0, 3063 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 3064 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 3065 channel)); 3066 else 3067 return (PIIX_IDETIM_SET(0, 3068 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 3069 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 3070 channel)); 3071 } 3072 3073 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 3074 u_int32_t 3075 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 3076 { 3077 u_int32_t ret = 0; 3078 struct channel_softc *chp = drvp->chnl_softc; 3079 u_int8_t channel = chp->channel; 3080 u_int8_t drive = drvp->drive; 3081 3082 /* 3083 * If drive is using UDMA, timings setups are independant 3084 * So just check DMA and PIO here. 3085 */ 3086 if (drvp->drive_flags & DRIVE_DMA) { 3087 /* if mode = DMA mode 0, use compatible timings */ 3088 if ((drvp->drive_flags & DRIVE_DMA) && 3089 drvp->DMA_mode == 0) { 3090 drvp->PIO_mode = 0; 3091 return (ret); 3092 } 3093 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3094 /* 3095 * PIO and DMA timings are the same, use fast timings for PIO 3096 * too, else use compat timings. 3097 */ 3098 if ((piix_isp_pio[drvp->PIO_mode] != 3099 piix_isp_dma[drvp->DMA_mode]) || 3100 (piix_rtc_pio[drvp->PIO_mode] != 3101 piix_rtc_dma[drvp->DMA_mode])) 3102 drvp->PIO_mode = 0; 3103 /* if PIO mode <= 2, use compat timings for PIO */ 3104 if (drvp->PIO_mode <= 2) { 3105 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 3106 channel); 3107 return (ret); 3108 } 3109 } 3110 3111 /* 3112 * Now setup PIO modes. If mode < 2, use compat timings. 3113 * Else enable fast timings. Enable IORDY and prefetch/post 3114 * if PIO mode >= 3. 3115 */ 3116 3117 if (drvp->PIO_mode < 2) 3118 return (ret); 3119 3120 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3121 if (drvp->PIO_mode >= 3) { 3122 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 3123 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 3124 } 3125 return (ret); 3126 } 3127 3128 /* setup values in SIDETIM registers, based on mode */ 3129 u_int32_t 3130 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3131 { 3132 if (dma) 3133 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 3134 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 3135 else 3136 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 3137 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 3138 } 3139 3140 void 3141 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3142 { 3143 struct pciide_channel *cp; 3144 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3145 int channel; 3146 pcireg_t chanenable; 3147 bus_size_t cmdsize, ctlsize; 3148 3149 printf(": DMA"); 3150 pciide_mapreg_dma(sc, pa); 3151 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3152 WDC_CAPABILITY_MODE; 3153 if (sc->sc_dma_ok) { 3154 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3155 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3156 sc->sc_wdcdev.irqack = pciide_irqack; 3157 } 3158 sc->sc_wdcdev.PIO_cap = 4; 3159 sc->sc_wdcdev.DMA_cap = 2; 3160 switch (sc->sc_pp->ide_product) { 3161 case PCI_PRODUCT_AMD_8111_IDE: 3162 sc->sc_wdcdev.UDMA_cap = 6; 3163 break; 3164 case PCI_PRODUCT_AMD_766_IDE: 3165 case PCI_PRODUCT_AMD_PBC768_IDE: 3166 sc->sc_wdcdev.UDMA_cap = 5; 3167 break; 3168 default: 3169 sc->sc_wdcdev.UDMA_cap = 4; 3170 break; 3171 } 3172 sc->sc_wdcdev.set_modes = amd756_setup_channel; 3173 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3174 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3175 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 3176 3177 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3178 3179 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3180 cp = &sc->pciide_channels[channel]; 3181 if (pciide_chansetup(sc, channel, interface) == 0) 3182 continue; 3183 3184 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 3185 printf("%s: %s ignored (disabled)\n", 3186 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3187 continue; 3188 } 3189 pciide_map_compat_intr(pa, cp, channel, interface); 3190 if (cp->hw_ok == 0) 3191 continue; 3192 3193 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3194 pciide_pci_intr); 3195 3196 if (pciide_chan_candisable(cp)) { 3197 chanenable &= ~AMD756_CHAN_EN(channel); 3198 } 3199 if (cp->hw_ok == 0) { 3200 pciide_unmap_compat_intr(pa, cp, channel, interface); 3201 continue; 3202 } 3203 3204 amd756_setup_channel(&cp->wdc_channel); 3205 } 3206 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 3207 chanenable); 3208 return; 3209 } 3210 3211 void 3212 amd756_setup_channel(struct channel_softc *chp) 3213 { 3214 u_int32_t udmatim_reg, datatim_reg; 3215 u_int8_t idedma_ctl; 3216 int mode, drive; 3217 struct ata_drive_datas *drvp; 3218 struct pciide_channel *cp = (struct pciide_channel *)chp; 3219 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3220 pcireg_t chanenable; 3221 #ifndef PCIIDE_AMD756_ENABLEDMA 3222 int product = sc->sc_pp->ide_product; 3223 int rev = sc->sc_rev; 3224 #endif 3225 3226 idedma_ctl = 0; 3227 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 3228 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 3229 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 3230 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 3231 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 3232 AMD756_CHANSTATUS_EN); 3233 3234 /* setup DMA if needed */ 3235 pciide_channel_dma_setup(cp); 3236 3237 for (drive = 0; drive < 2; drive++) { 3238 drvp = &chp->ch_drive[drive]; 3239 /* If no drive, skip */ 3240 if ((drvp->drive_flags & DRIVE) == 0) 3241 continue; 3242 /* add timing values, setup DMA if needed */ 3243 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3244 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3245 mode = drvp->PIO_mode; 3246 goto pio; 3247 } 3248 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3249 (drvp->drive_flags & DRIVE_UDMA)) { 3250 /* use Ultra/DMA */ 3251 drvp->drive_flags &= ~DRIVE_DMA; 3252 3253 /* Check cable */ 3254 if ((chanenable & AMD756_CABLE(chp->channel, 3255 drive)) == 0 && drvp->UDMA_mode > 2) { 3256 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 3257 "cable not detected\n", drvp->drive_name, 3258 sc->sc_wdcdev.sc_dev.dv_xname, 3259 chp->channel, drive), DEBUG_PROBE); 3260 drvp->UDMA_mode = 2; 3261 } 3262 3263 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 3264 AMD756_UDMA_EN_MTH(chp->channel, drive) | 3265 AMD756_UDMA_TIME(chp->channel, drive, 3266 amd756_udma_tim[drvp->UDMA_mode]); 3267 /* can use PIO timings, MW DMA unused */ 3268 mode = drvp->PIO_mode; 3269 } else { 3270 /* use Multiword DMA, but only if revision is OK */ 3271 drvp->drive_flags &= ~DRIVE_UDMA; 3272 #ifndef PCIIDE_AMD756_ENABLEDMA 3273 /* 3274 * The workaround doesn't seem to be necessary 3275 * with all drives, so it can be disabled by 3276 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 3277 * triggered. 3278 */ 3279 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 3280 printf("%s:%d:%d: multi-word DMA disabled due " 3281 "to chip revision\n", 3282 sc->sc_wdcdev.sc_dev.dv_xname, 3283 chp->channel, drive); 3284 mode = drvp->PIO_mode; 3285 drvp->drive_flags &= ~DRIVE_DMA; 3286 goto pio; 3287 } 3288 #endif 3289 /* mode = min(pio, dma+2) */ 3290 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3291 mode = drvp->PIO_mode; 3292 else 3293 mode = drvp->DMA_mode + 2; 3294 } 3295 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3296 3297 pio: /* setup PIO mode */ 3298 if (mode <= 2) { 3299 drvp->DMA_mode = 0; 3300 drvp->PIO_mode = 0; 3301 mode = 0; 3302 } else { 3303 drvp->PIO_mode = mode; 3304 drvp->DMA_mode = mode - 2; 3305 } 3306 datatim_reg |= 3307 AMD756_DATATIM_PULSE(chp->channel, drive, 3308 amd756_pio_set[mode]) | 3309 AMD756_DATATIM_RECOV(chp->channel, drive, 3310 amd756_pio_rec[mode]); 3311 } 3312 if (idedma_ctl != 0) { 3313 /* Add software bits in status register */ 3314 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3315 IDEDMA_CTL(chp->channel), 3316 idedma_ctl); 3317 } 3318 pciide_print_modes(cp); 3319 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3320 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3321 } 3322 3323 void 3324 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3325 { 3326 struct pciide_channel *cp; 3327 pcireg_t interface; 3328 int channel; 3329 u_int32_t ideconf; 3330 bus_size_t cmdsize, ctlsize; 3331 pcitag_t tag; 3332 pcireg_t id, class; 3333 3334 /* 3335 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3336 */ 3337 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3338 interface = PCI_INTERFACE(pa->pa_class); 3339 } else { 3340 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3341 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3342 } 3343 3344 if ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT6410) || 3345 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT6415) || 3346 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_CX700_IDE) || 3347 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX700_IDE) || 3348 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX855_IDE) || 3349 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX900_IDE)) { 3350 printf(": ATA133"); 3351 sc->sc_wdcdev.UDMA_cap = 6; 3352 } else { 3353 /* 3354 * Determine the DMA capabilities by looking at the 3355 * ISA bridge. 3356 */ 3357 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3358 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3359 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3360 3361 /* 3362 * XXX On the VT8237, the ISA bridge is on a different 3363 * device. 3364 */ 3365 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3366 pa->pa_device == 15) { 3367 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3368 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3369 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3370 } 3371 3372 switch (PCI_PRODUCT(id)) { 3373 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3374 if (PCI_REVISION(class) >= 0x02) { 3375 printf(": ATA33"); 3376 sc->sc_wdcdev.UDMA_cap = 2; 3377 } else { 3378 printf(": DMA"); 3379 sc->sc_wdcdev.UDMA_cap = 0; 3380 } 3381 break; 3382 case PCI_PRODUCT_VIATECH_VT82C596A: 3383 if (PCI_REVISION(class) >= 0x12) { 3384 printf(": ATA66"); 3385 sc->sc_wdcdev.UDMA_cap = 4; 3386 } else { 3387 printf(": ATA33"); 3388 sc->sc_wdcdev.UDMA_cap = 2; 3389 } 3390 break; 3391 3392 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3393 if (PCI_REVISION(class) >= 0x40) { 3394 printf(": ATA100"); 3395 sc->sc_wdcdev.UDMA_cap = 5; 3396 } else { 3397 printf(": ATA66"); 3398 sc->sc_wdcdev.UDMA_cap = 4; 3399 } 3400 break; 3401 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3402 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3403 printf(": ATA100"); 3404 sc->sc_wdcdev.UDMA_cap = 5; 3405 break; 3406 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3407 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3408 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3409 printf(": ATA133"); 3410 sc->sc_wdcdev.UDMA_cap = 6; 3411 break; 3412 default: 3413 printf(": DMA"); 3414 sc->sc_wdcdev.UDMA_cap = 0; 3415 break; 3416 } 3417 } 3418 3419 pciide_mapreg_dma(sc, pa); 3420 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3421 WDC_CAPABILITY_MODE; 3422 if (sc->sc_dma_ok) { 3423 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3424 sc->sc_wdcdev.irqack = pciide_irqack; 3425 if (sc->sc_wdcdev.UDMA_cap > 0) 3426 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3427 } 3428 sc->sc_wdcdev.PIO_cap = 4; 3429 sc->sc_wdcdev.DMA_cap = 2; 3430 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3431 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3432 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3433 3434 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3435 3436 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3437 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3438 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3439 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3440 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3441 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3442 DEBUG_PROBE); 3443 3444 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3445 cp = &sc->pciide_channels[channel]; 3446 if (pciide_chansetup(sc, channel, interface) == 0) 3447 continue; 3448 3449 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 3450 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3451 printf("%s: %s ignored (disabled)\n", 3452 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3453 continue; 3454 } 3455 pciide_map_compat_intr(pa, cp, channel, interface); 3456 if (cp->hw_ok == 0) 3457 continue; 3458 3459 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3460 pciide_pci_intr); 3461 if (cp->hw_ok == 0) { 3462 goto next; 3463 } 3464 if (pciide_chan_candisable(cp)) { 3465 ideconf &= ~APO_IDECONF_EN(channel); 3466 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 3467 ideconf); 3468 } 3469 3470 if (cp->hw_ok == 0) 3471 goto next; 3472 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3473 next: 3474 if (cp->hw_ok == 0) 3475 pciide_unmap_compat_intr(pa, cp, channel, interface); 3476 } 3477 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3478 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3479 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3480 } 3481 3482 void 3483 apollo_setup_channel(struct channel_softc *chp) 3484 { 3485 u_int32_t udmatim_reg, datatim_reg; 3486 u_int8_t idedma_ctl; 3487 int mode, drive; 3488 struct ata_drive_datas *drvp; 3489 struct pciide_channel *cp = (struct pciide_channel *)chp; 3490 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3491 3492 idedma_ctl = 0; 3493 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3494 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3495 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3496 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3497 3498 /* setup DMA if needed */ 3499 pciide_channel_dma_setup(cp); 3500 3501 /* 3502 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3503 * downgrade to Ultra/33 if needed 3504 */ 3505 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3506 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3507 /* both drives UDMA */ 3508 if (chp->ch_drive[0].UDMA_mode > 2 && 3509 chp->ch_drive[1].UDMA_mode <= 2) { 3510 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3511 chp->ch_drive[0].UDMA_mode = 2; 3512 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3513 chp->ch_drive[0].UDMA_mode <= 2) { 3514 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3515 chp->ch_drive[1].UDMA_mode = 2; 3516 } 3517 } 3518 3519 for (drive = 0; drive < 2; drive++) { 3520 drvp = &chp->ch_drive[drive]; 3521 /* If no drive, skip */ 3522 if ((drvp->drive_flags & DRIVE) == 0) 3523 continue; 3524 /* add timing values, setup DMA if needed */ 3525 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3526 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3527 mode = drvp->PIO_mode; 3528 goto pio; 3529 } 3530 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3531 (drvp->drive_flags & DRIVE_UDMA)) { 3532 /* use Ultra/DMA */ 3533 drvp->drive_flags &= ~DRIVE_DMA; 3534 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3535 APO_UDMA_EN_MTH(chp->channel, drive); 3536 if (sc->sc_wdcdev.UDMA_cap == 6) { 3537 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3538 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3539 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3540 /* 686b */ 3541 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3542 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3543 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3544 /* 596b or 686a */ 3545 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3546 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3547 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3548 } else { 3549 /* 596a or 586b */ 3550 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3551 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3552 } 3553 /* can use PIO timings, MW DMA unused */ 3554 mode = drvp->PIO_mode; 3555 } else { 3556 /* use Multiword DMA */ 3557 drvp->drive_flags &= ~DRIVE_UDMA; 3558 /* mode = min(pio, dma+2) */ 3559 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3560 mode = drvp->PIO_mode; 3561 else 3562 mode = drvp->DMA_mode + 2; 3563 } 3564 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3565 3566 pio: /* setup PIO mode */ 3567 if (mode <= 2) { 3568 drvp->DMA_mode = 0; 3569 drvp->PIO_mode = 0; 3570 mode = 0; 3571 } else { 3572 drvp->PIO_mode = mode; 3573 drvp->DMA_mode = mode - 2; 3574 } 3575 datatim_reg |= 3576 APO_DATATIM_PULSE(chp->channel, drive, 3577 apollo_pio_set[mode]) | 3578 APO_DATATIM_RECOV(chp->channel, drive, 3579 apollo_pio_rec[mode]); 3580 } 3581 if (idedma_ctl != 0) { 3582 /* Add software bits in status register */ 3583 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3584 IDEDMA_CTL(chp->channel), 3585 idedma_ctl); 3586 } 3587 pciide_print_modes(cp); 3588 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3589 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3590 } 3591 3592 void 3593 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3594 int channel) 3595 { 3596 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3597 bus_size_t cmdsize, ctlsize; 3598 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3599 pcireg_t interface; 3600 int one_channel; 3601 3602 /* 3603 * The 0648/0649 can be told to identify as a RAID controller. 3604 * In this case, we have to fake interface 3605 */ 3606 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3607 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3608 PCIIDE_INTERFACE_SETTABLE(1); 3609 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3610 CMD_CONF_DSA1) 3611 interface |= PCIIDE_INTERFACE_PCI(0) | 3612 PCIIDE_INTERFACE_PCI(1); 3613 } else { 3614 interface = PCI_INTERFACE(pa->pa_class); 3615 } 3616 3617 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3618 cp->name = PCIIDE_CHANNEL_NAME(channel); 3619 cp->wdc_channel.channel = channel; 3620 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3621 3622 /* 3623 * Older CMD64X doesn't have independant channels 3624 */ 3625 switch (sc->sc_pp->ide_product) { 3626 case PCI_PRODUCT_CMDTECH_649: 3627 one_channel = 0; 3628 break; 3629 default: 3630 one_channel = 1; 3631 break; 3632 } 3633 3634 if (channel > 0 && one_channel) { 3635 cp->wdc_channel.ch_queue = 3636 sc->pciide_channels[0].wdc_channel.ch_queue; 3637 } else { 3638 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 3639 } 3640 if (cp->wdc_channel.ch_queue == NULL) { 3641 printf( 3642 "%s: %s cannot allocate channel queue", 3643 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3644 return; 3645 } 3646 3647 /* 3648 * with a CMD PCI64x, if we get here, the first channel is enabled: 3649 * there's no way to disable the first channel without disabling 3650 * the whole device 3651 */ 3652 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3653 printf("%s: %s ignored (disabled)\n", 3654 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3655 return; 3656 } 3657 cp->hw_ok = 1; 3658 pciide_map_compat_intr(pa, cp, channel, interface); 3659 if (cp->hw_ok == 0) 3660 return; 3661 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3662 if (cp->hw_ok == 0) { 3663 pciide_unmap_compat_intr(pa, cp, channel, interface); 3664 return; 3665 } 3666 if (pciide_chan_candisable(cp)) { 3667 if (channel == 1) { 3668 ctrl &= ~CMD_CTRL_2PORT; 3669 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3670 CMD_CTRL, ctrl); 3671 pciide_unmap_compat_intr(pa, cp, channel, interface); 3672 } 3673 } 3674 } 3675 3676 int 3677 cmd_pci_intr(void *arg) 3678 { 3679 struct pciide_softc *sc = arg; 3680 struct pciide_channel *cp; 3681 struct channel_softc *wdc_cp; 3682 int i, rv, crv; 3683 u_int32_t priirq, secirq; 3684 3685 rv = 0; 3686 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3687 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3688 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3689 cp = &sc->pciide_channels[i]; 3690 wdc_cp = &cp->wdc_channel; 3691 /* If a compat channel skip. */ 3692 if (cp->compat) 3693 continue; 3694 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3695 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3696 crv = wdcintr(wdc_cp); 3697 if (crv == 0) { 3698 #if 0 3699 printf("%s:%d: bogus intr\n", 3700 sc->sc_wdcdev.sc_dev.dv_xname, i); 3701 #endif 3702 } else 3703 rv = 1; 3704 } 3705 } 3706 return (rv); 3707 } 3708 3709 void 3710 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3711 { 3712 int channel; 3713 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3714 3715 printf(": no DMA"); 3716 sc->sc_dma_ok = 0; 3717 3718 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3719 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3720 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3721 3722 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3723 3724 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3725 cmd_channel_map(pa, sc, channel); 3726 } 3727 } 3728 3729 void 3730 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3731 { 3732 struct pciide_channel *cp; 3733 int channel; 3734 int rev = sc->sc_rev; 3735 pcireg_t interface; 3736 3737 /* 3738 * The 0648/0649 can be told to identify as a RAID controller. 3739 * In this case, we have to fake interface 3740 */ 3741 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3742 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3743 PCIIDE_INTERFACE_SETTABLE(1); 3744 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3745 CMD_CONF_DSA1) 3746 interface |= PCIIDE_INTERFACE_PCI(0) | 3747 PCIIDE_INTERFACE_PCI(1); 3748 } else { 3749 interface = PCI_INTERFACE(pa->pa_class); 3750 } 3751 3752 printf(": DMA"); 3753 pciide_mapreg_dma(sc, pa); 3754 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3755 WDC_CAPABILITY_MODE; 3756 if (sc->sc_dma_ok) { 3757 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3758 switch (sc->sc_pp->ide_product) { 3759 case PCI_PRODUCT_CMDTECH_649: 3760 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3761 sc->sc_wdcdev.UDMA_cap = 5; 3762 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3763 break; 3764 case PCI_PRODUCT_CMDTECH_648: 3765 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3766 sc->sc_wdcdev.UDMA_cap = 4; 3767 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3768 break; 3769 case PCI_PRODUCT_CMDTECH_646: 3770 if (rev >= CMD0646U2_REV) { 3771 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3772 sc->sc_wdcdev.UDMA_cap = 2; 3773 } else if (rev >= CMD0646U_REV) { 3774 /* 3775 * Linux's driver claims that the 646U is broken 3776 * with UDMA. Only enable it if we know what we're 3777 * doing 3778 */ 3779 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3780 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3781 sc->sc_wdcdev.UDMA_cap = 2; 3782 #endif 3783 /* explicitly disable UDMA */ 3784 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3785 CMD_UDMATIM(0), 0); 3786 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3787 CMD_UDMATIM(1), 0); 3788 } 3789 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3790 break; 3791 default: 3792 sc->sc_wdcdev.irqack = pciide_irqack; 3793 } 3794 } 3795 3796 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3797 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3798 sc->sc_wdcdev.PIO_cap = 4; 3799 sc->sc_wdcdev.DMA_cap = 2; 3800 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3801 3802 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3803 3804 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3805 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3806 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3807 DEBUG_PROBE); 3808 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3809 cp = &sc->pciide_channels[channel]; 3810 cmd_channel_map(pa, sc, channel); 3811 if (cp->hw_ok == 0) 3812 continue; 3813 cmd0643_9_setup_channel(&cp->wdc_channel); 3814 } 3815 /* 3816 * note - this also makes sure we clear the irq disable and reset 3817 * bits 3818 */ 3819 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3820 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3821 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3822 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3823 DEBUG_PROBE); 3824 } 3825 3826 void 3827 cmd0643_9_setup_channel(struct channel_softc *chp) 3828 { 3829 struct ata_drive_datas *drvp; 3830 u_int8_t tim; 3831 u_int32_t idedma_ctl, udma_reg; 3832 int drive; 3833 struct pciide_channel *cp = (struct pciide_channel *)chp; 3834 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3835 3836 idedma_ctl = 0; 3837 /* setup DMA if needed */ 3838 pciide_channel_dma_setup(cp); 3839 3840 for (drive = 0; drive < 2; drive++) { 3841 drvp = &chp->ch_drive[drive]; 3842 /* If no drive, skip */ 3843 if ((drvp->drive_flags & DRIVE) == 0) 3844 continue; 3845 /* add timing values, setup DMA if needed */ 3846 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3847 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3848 if (drvp->drive_flags & DRIVE_UDMA) { 3849 /* UltraDMA on a 646U2, 0648 or 0649 */ 3850 drvp->drive_flags &= ~DRIVE_DMA; 3851 udma_reg = pciide_pci_read(sc->sc_pc, 3852 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3853 if (drvp->UDMA_mode > 2 && 3854 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3855 CMD_BICSR) & 3856 CMD_BICSR_80(chp->channel)) == 0) { 3857 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3858 "80-wire cable not detected\n", 3859 drvp->drive_name, 3860 sc->sc_wdcdev.sc_dev.dv_xname, 3861 chp->channel, drive), DEBUG_PROBE); 3862 drvp->UDMA_mode = 2; 3863 } 3864 if (drvp->UDMA_mode > 2) 3865 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 3866 else if (sc->sc_wdcdev.UDMA_cap > 2) 3867 udma_reg |= CMD_UDMATIM_UDMA33(drive); 3868 udma_reg |= CMD_UDMATIM_UDMA(drive); 3869 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 3870 CMD_UDMATIM_TIM_OFF(drive)); 3871 udma_reg |= 3872 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 3873 CMD_UDMATIM_TIM_OFF(drive)); 3874 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3875 CMD_UDMATIM(chp->channel), udma_reg); 3876 } else { 3877 /* 3878 * use Multiword DMA. 3879 * Timings will be used for both PIO and DMA, 3880 * so adjust DMA mode if needed 3881 * if we have a 0646U2/8/9, turn off UDMA 3882 */ 3883 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 3884 udma_reg = pciide_pci_read(sc->sc_pc, 3885 sc->sc_tag, 3886 CMD_UDMATIM(chp->channel)); 3887 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 3888 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3889 CMD_UDMATIM(chp->channel), 3890 udma_reg); 3891 } 3892 if (drvp->PIO_mode >= 3 && 3893 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3894 drvp->DMA_mode = drvp->PIO_mode - 2; 3895 } 3896 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 3897 } 3898 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3899 } 3900 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3901 CMD_DATA_TIM(chp->channel, drive), tim); 3902 } 3903 if (idedma_ctl != 0) { 3904 /* Add software bits in status register */ 3905 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3906 IDEDMA_CTL(chp->channel), 3907 idedma_ctl); 3908 } 3909 pciide_print_modes(cp); 3910 #ifdef __sparc64__ 3911 /* 3912 * The Ultra 5 has a tendency to hang during reboot. This is due 3913 * to the PCI0646U asserting a PCI interrupt line when the chip 3914 * registers claim that it is not. Performing a reset at this 3915 * point appears to eliminate the symptoms. It is likely the 3916 * real cause is still lurking somewhere in the code. 3917 */ 3918 wdcreset(chp, SILENT); 3919 #endif /* __sparc64__ */ 3920 } 3921 3922 void 3923 cmd646_9_irqack(struct channel_softc *chp) 3924 { 3925 u_int32_t priirq, secirq; 3926 struct pciide_channel *cp = (struct pciide_channel *)chp; 3927 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3928 3929 if (chp->channel == 0) { 3930 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3931 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 3932 } else { 3933 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3934 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 3935 } 3936 pciide_irqack(chp); 3937 } 3938 3939 void 3940 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3941 { 3942 struct pciide_channel *cp; 3943 int channel; 3944 3945 printf("\n%s: bus-master DMA support present", 3946 sc->sc_wdcdev.sc_dev.dv_xname); 3947 pciide_mapreg_dma(sc, pa); 3948 printf("\n"); 3949 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3950 WDC_CAPABILITY_MODE; 3951 if (sc->sc_dma_ok) { 3952 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3953 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3954 sc->sc_wdcdev.UDMA_cap = 6; 3955 sc->sc_wdcdev.irqack = pciide_irqack; 3956 } 3957 3958 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3959 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3960 sc->sc_wdcdev.PIO_cap = 4; 3961 sc->sc_wdcdev.DMA_cap = 2; 3962 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 3963 3964 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 3965 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 3966 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 3967 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 3968 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3969 cp = &sc->pciide_channels[channel]; 3970 cmd680_channel_map(pa, sc, channel); 3971 if (cp->hw_ok == 0) 3972 continue; 3973 cmd680_setup_channel(&cp->wdc_channel); 3974 } 3975 } 3976 3977 void 3978 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3979 int channel) 3980 { 3981 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3982 bus_size_t cmdsize, ctlsize; 3983 int interface, i, reg; 3984 static const u_int8_t init_val[] = 3985 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 3986 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 3987 3988 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3989 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3990 PCIIDE_INTERFACE_SETTABLE(1); 3991 interface |= PCIIDE_INTERFACE_PCI(0) | 3992 PCIIDE_INTERFACE_PCI(1); 3993 } else { 3994 interface = PCI_INTERFACE(pa->pa_class); 3995 } 3996 3997 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3998 cp->name = PCIIDE_CHANNEL_NAME(channel); 3999 cp->wdc_channel.channel = channel; 4000 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4001 4002 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4003 if (cp->wdc_channel.ch_queue == NULL) { 4004 printf("%s %s: " 4005 "cannot allocate channel queue", 4006 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4007 return; 4008 } 4009 4010 /* XXX */ 4011 reg = 0xa2 + channel * 16; 4012 for (i = 0; i < sizeof(init_val); i++) 4013 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 4014 4015 printf("%s: %s %s to %s mode\n", 4016 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 4017 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 4018 "configured" : "wired", 4019 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 4020 "native-PCI" : "compatibility"); 4021 4022 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 4023 if (cp->hw_ok == 0) 4024 return; 4025 pciide_map_compat_intr(pa, cp, channel, interface); 4026 } 4027 4028 void 4029 cmd680_setup_channel(struct channel_softc *chp) 4030 { 4031 struct ata_drive_datas *drvp; 4032 u_int8_t mode, off, scsc; 4033 u_int16_t val; 4034 u_int32_t idedma_ctl; 4035 int drive; 4036 struct pciide_channel *cp = (struct pciide_channel *)chp; 4037 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4038 pci_chipset_tag_t pc = sc->sc_pc; 4039 pcitag_t pa = sc->sc_tag; 4040 static const u_int8_t udma2_tbl[] = 4041 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 4042 static const u_int8_t udma_tbl[] = 4043 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 4044 static const u_int16_t dma_tbl[] = 4045 { 0x2208, 0x10c2, 0x10c1 }; 4046 static const u_int16_t pio_tbl[] = 4047 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 4048 4049 idedma_ctl = 0; 4050 pciide_channel_dma_setup(cp); 4051 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 4052 4053 for (drive = 0; drive < 2; drive++) { 4054 drvp = &chp->ch_drive[drive]; 4055 /* If no drive, skip */ 4056 if ((drvp->drive_flags & DRIVE) == 0) 4057 continue; 4058 mode &= ~(0x03 << (drive * 4)); 4059 if (drvp->drive_flags & DRIVE_UDMA) { 4060 drvp->drive_flags &= ~DRIVE_DMA; 4061 off = 0xa0 + chp->channel * 16; 4062 if (drvp->UDMA_mode > 2 && 4063 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 4064 drvp->UDMA_mode = 2; 4065 scsc = pciide_pci_read(pc, pa, 0x8a); 4066 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 4067 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 4068 scsc = pciide_pci_read(pc, pa, 0x8a); 4069 if ((scsc & 0x30) == 0) 4070 drvp->UDMA_mode = 5; 4071 } 4072 mode |= 0x03 << (drive * 4); 4073 off = 0xac + chp->channel * 16 + drive * 2; 4074 val = pciide_pci_read(pc, pa, off) & ~0x3f; 4075 if (scsc & 0x30) 4076 val |= udma2_tbl[drvp->UDMA_mode]; 4077 else 4078 val |= udma_tbl[drvp->UDMA_mode]; 4079 pciide_pci_write(pc, pa, off, val); 4080 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4081 } else if (drvp->drive_flags & DRIVE_DMA) { 4082 mode |= 0x02 << (drive * 4); 4083 off = 0xa8 + chp->channel * 16 + drive * 2; 4084 val = dma_tbl[drvp->DMA_mode]; 4085 pciide_pci_write(pc, pa, off, val & 0xff); 4086 pciide_pci_write(pc, pa, off, val >> 8); 4087 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4088 } else { 4089 mode |= 0x01 << (drive * 4); 4090 off = 0xa4 + chp->channel * 16 + drive * 2; 4091 val = pio_tbl[drvp->PIO_mode]; 4092 pciide_pci_write(pc, pa, off, val & 0xff); 4093 pciide_pci_write(pc, pa, off, val >> 8); 4094 } 4095 } 4096 4097 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 4098 if (idedma_ctl != 0) { 4099 /* Add software bits in status register */ 4100 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4101 IDEDMA_CTL(chp->channel), 4102 idedma_ctl); 4103 } 4104 pciide_print_modes(cp); 4105 } 4106 4107 /* 4108 * When the Silicon Image 3112 retries a PCI memory read command, 4109 * it may retry it as a memory read multiple command under some 4110 * circumstances. This can totally confuse some PCI controllers, 4111 * so ensure that it will never do this by making sure that the 4112 * Read Threshold (FIFO Read Request Control) field of the FIFO 4113 * Valid Byte Count and Control registers for both channels (BA5 4114 * offset 0x40 and 0x44) are set to be at least as large as the 4115 * cacheline size register. 4116 */ 4117 void 4118 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 4119 { 4120 pcireg_t cls, reg40, reg44; 4121 4122 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4123 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 4124 cls *= 4; 4125 if (cls > 224) { 4126 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4127 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 4128 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 4129 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 4130 cls = 224; 4131 } 4132 if (cls < 32) 4133 cls = 32; 4134 cls = (cls + 31) / 32; 4135 reg40 = ba5_read_4(sc, 0x40); 4136 reg44 = ba5_read_4(sc, 0x44); 4137 if ((reg40 & 0x7) < cls) 4138 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 4139 if ((reg44 & 0x7) < cls) 4140 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 4141 } 4142 4143 void 4144 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4145 { 4146 struct pciide_channel *cp; 4147 bus_size_t cmdsize, ctlsize; 4148 pcireg_t interface, scs_cmd, cfgctl; 4149 int channel; 4150 struct pciide_satalink *sl; 4151 4152 /* Allocate memory for private data */ 4153 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4154 sl = sc->sc_cookie; 4155 4156 sc->chip_unmap = default_chip_unmap; 4157 4158 #define SII3112_RESET_BITS \ 4159 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4160 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4161 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 4162 4163 /* 4164 * Reset everything and then unblock all of the interrupts. 4165 */ 4166 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4167 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4168 scs_cmd | SII3112_RESET_BITS); 4169 delay(50 * 1000); 4170 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4171 scs_cmd & SCS_CMD_BA5_EN); 4172 delay(50 * 1000); 4173 4174 if (scs_cmd & SCS_CMD_BA5_EN) { 4175 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4176 PCI_MAPREG_TYPE_MEM | 4177 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4178 &sl->ba5_st, &sl->ba5_sh, 4179 NULL, NULL, 0) != 0) 4180 printf(": unable to map BA5 register space\n"); 4181 else 4182 sl->ba5_en = 1; 4183 } else { 4184 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 4185 SII3112_PCI_CFGCTL); 4186 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 4187 cfgctl | CFGCTL_BA5INDEN); 4188 } 4189 4190 printf(": DMA"); 4191 pciide_mapreg_dma(sc, pa); 4192 printf("\n"); 4193 4194 /* 4195 * Rev. <= 0x01 of the 3112 have a bug that can cause data 4196 * corruption if DMA transfers cross an 8K boundary. This is 4197 * apparently hard to tickle, but we'll go ahead and play it 4198 * safe. 4199 */ 4200 if (sc->sc_rev <= 0x01) { 4201 sc->sc_dma_maxsegsz = 8192; 4202 sc->sc_dma_boundary = 8192; 4203 } 4204 4205 sii_fixup_cacheline(sc, pa); 4206 4207 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4208 sc->sc_wdcdev.PIO_cap = 4; 4209 if (sc->sc_dma_ok) { 4210 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4211 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4212 sc->sc_wdcdev.irqack = pciide_irqack; 4213 sc->sc_wdcdev.DMA_cap = 2; 4214 sc->sc_wdcdev.UDMA_cap = 6; 4215 } 4216 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4217 4218 /* We can use SControl and SStatus to probe for drives. */ 4219 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4220 4221 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4222 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4223 4224 /* 4225 * The 3112 either identifies itself as a RAID storage device 4226 * or a Misc storage device. Fake up the interface bits for 4227 * what our driver expects. 4228 */ 4229 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4230 interface = PCI_INTERFACE(pa->pa_class); 4231 } else { 4232 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4233 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4234 } 4235 4236 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4237 cp = &sc->pciide_channels[channel]; 4238 if (pciide_chansetup(sc, channel, interface) == 0) 4239 continue; 4240 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4241 pciide_pci_intr); 4242 if (cp->hw_ok == 0) 4243 continue; 4244 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4245 } 4246 } 4247 4248 void 4249 sii3112_setup_channel(struct channel_softc *chp) 4250 { 4251 struct ata_drive_datas *drvp; 4252 int drive; 4253 u_int32_t idedma_ctl, dtm; 4254 struct pciide_channel *cp = (struct pciide_channel *)chp; 4255 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4256 4257 /* setup DMA if needed */ 4258 pciide_channel_dma_setup(cp); 4259 4260 idedma_ctl = 0; 4261 dtm = 0; 4262 4263 for (drive = 0; drive < 2; drive++) { 4264 drvp = &chp->ch_drive[drive]; 4265 /* If no drive, skip */ 4266 if ((drvp->drive_flags & DRIVE) == 0) 4267 continue; 4268 if (drvp->drive_flags & DRIVE_UDMA) { 4269 /* use Ultra/DMA */ 4270 drvp->drive_flags &= ~DRIVE_DMA; 4271 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4272 dtm |= DTM_IDEx_DMA; 4273 } else if (drvp->drive_flags & DRIVE_DMA) { 4274 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4275 dtm |= DTM_IDEx_DMA; 4276 } else { 4277 dtm |= DTM_IDEx_PIO; 4278 } 4279 } 4280 4281 /* 4282 * Nothing to do to setup modes; it is meaningless in S-ATA 4283 * (but many S-ATA drives still want to get the SET_FEATURE 4284 * command). 4285 */ 4286 if (idedma_ctl != 0) { 4287 /* Add software bits in status register */ 4288 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 4289 } 4290 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 4291 pciide_print_modes(cp); 4292 } 4293 4294 void 4295 sii3112_drv_probe(struct channel_softc *chp) 4296 { 4297 struct pciide_channel *cp = (struct pciide_channel *)chp; 4298 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4299 uint32_t scontrol, sstatus; 4300 uint8_t scnt, sn, cl, ch; 4301 int s; 4302 4303 /* 4304 * The 3112 is a 2-port part, and only has one drive per channel 4305 * (each port emulates a master drive). 4306 * 4307 * The 3114 is similar, but has 4 channels. 4308 */ 4309 4310 /* 4311 * Request communication initialization sequence, any speed. 4312 * Performing this is the equivalent of an ATA Reset. 4313 */ 4314 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4315 4316 /* 4317 * XXX We don't yet support SATA power management; disable all 4318 * power management state transitions. 4319 */ 4320 scontrol |= SControl_IPM_NONE; 4321 4322 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4323 delay(50 * 1000); 4324 scontrol &= ~SControl_DET_INIT; 4325 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4326 delay(50 * 1000); 4327 4328 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4329 #if 0 4330 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4331 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4332 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4333 #endif 4334 switch (sstatus & SStatus_DET_mask) { 4335 case SStatus_DET_NODEV: 4336 /* No device; be silent. */ 4337 break; 4338 4339 case SStatus_DET_DEV_NE: 4340 printf("%s: port %d: device connected, but " 4341 "communication not established\n", 4342 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4343 break; 4344 4345 case SStatus_DET_OFFLINE: 4346 printf("%s: port %d: PHY offline\n", 4347 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4348 break; 4349 4350 case SStatus_DET_DEV: 4351 /* 4352 * XXX ATAPI detection doesn't currently work. Don't 4353 * XXX know why. But, it's not like the standard method 4354 * XXX can detect an ATAPI device connected via a SATA/PATA 4355 * XXX bridge, so at least this is no worse. --thorpej 4356 */ 4357 if (chp->_vtbl != NULL) 4358 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4359 else 4360 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4361 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4362 delay(10); /* 400ns delay */ 4363 /* Save register contents. */ 4364 if (chp->_vtbl != NULL) { 4365 scnt = CHP_READ_REG(chp, wdr_seccnt); 4366 sn = CHP_READ_REG(chp, wdr_sector); 4367 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4368 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4369 } else { 4370 scnt = bus_space_read_1(chp->cmd_iot, 4371 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4372 sn = bus_space_read_1(chp->cmd_iot, 4373 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4374 cl = bus_space_read_1(chp->cmd_iot, 4375 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4376 ch = bus_space_read_1(chp->cmd_iot, 4377 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4378 } 4379 #if 0 4380 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4381 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4382 scnt, sn, cl, ch); 4383 #endif 4384 /* 4385 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4386 * cases we get wrong values here, so ignore it. 4387 */ 4388 s = splbio(); 4389 if (cl == 0x14 && ch == 0xeb) 4390 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4391 else 4392 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4393 splx(s); 4394 4395 printf("%s: port %d: device present", 4396 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4397 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4398 case 1: 4399 printf(", speed: 1.5Gb/s"); 4400 break; 4401 case 2: 4402 printf(", speed: 3.0Gb/s"); 4403 break; 4404 } 4405 printf("\n"); 4406 break; 4407 4408 default: 4409 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4410 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4411 } 4412 } 4413 4414 void 4415 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4416 { 4417 struct pciide_channel *cp; 4418 pcireg_t scs_cmd; 4419 pci_intr_handle_t intrhandle; 4420 const char *intrstr; 4421 int channel; 4422 struct pciide_satalink *sl; 4423 4424 /* Allocate memory for private data */ 4425 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4426 sl = sc->sc_cookie; 4427 4428 #define SII3114_RESET_BITS \ 4429 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4430 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4431 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4432 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4433 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4434 4435 /* 4436 * Reset everything and then unblock all of the interrupts. 4437 */ 4438 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4439 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4440 scs_cmd | SII3114_RESET_BITS); 4441 delay(50 * 1000); 4442 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4443 scs_cmd & SCS_CMD_M66EN); 4444 delay(50 * 1000); 4445 4446 /* 4447 * On the 3114, the BA5 register space is always enabled. In 4448 * order to use the 3114 in any sane way, we must use this BA5 4449 * register space, and so we consider it an error if we cannot 4450 * map it. 4451 * 4452 * As a consequence of using BA5, our register mapping is different 4453 * from a normal PCI IDE controller's, and so we are unable to use 4454 * most of the common PCI IDE register mapping functions. 4455 */ 4456 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4457 PCI_MAPREG_TYPE_MEM | 4458 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4459 &sl->ba5_st, &sl->ba5_sh, 4460 NULL, NULL, 0) != 0) { 4461 printf(": unable to map BA5 register space\n"); 4462 return; 4463 } 4464 sl->ba5_en = 1; 4465 4466 /* 4467 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4468 * channel 2. This is required at all times for proper operation 4469 * when using the BA5 register space (otherwise interrupts from 4470 * all 4 channels won't work). 4471 */ 4472 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4473 4474 printf(": DMA"); 4475 sii3114_mapreg_dma(sc, pa); 4476 printf("\n"); 4477 4478 sii_fixup_cacheline(sc, pa); 4479 4480 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4481 sc->sc_wdcdev.PIO_cap = 4; 4482 if (sc->sc_dma_ok) { 4483 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4484 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4485 sc->sc_wdcdev.irqack = pciide_irqack; 4486 sc->sc_wdcdev.DMA_cap = 2; 4487 sc->sc_wdcdev.UDMA_cap = 6; 4488 } 4489 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4490 4491 /* We can use SControl and SStatus to probe for drives. */ 4492 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4493 4494 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4495 sc->sc_wdcdev.nchannels = 4; 4496 4497 /* Map and establish the interrupt handler. */ 4498 if (pci_intr_map(pa, &intrhandle) != 0) { 4499 printf("%s: couldn't map native-PCI interrupt\n", 4500 sc->sc_wdcdev.sc_dev.dv_xname); 4501 return; 4502 } 4503 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4504 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4505 /* XXX */ 4506 pciide_pci_intr, sc, 4507 sc->sc_wdcdev.sc_dev.dv_xname); 4508 if (sc->sc_pci_ih != NULL) { 4509 printf("%s: using %s for native-PCI interrupt\n", 4510 sc->sc_wdcdev.sc_dev.dv_xname, 4511 intrstr ? intrstr : "unknown interrupt"); 4512 } else { 4513 printf("%s: couldn't establish native-PCI interrupt", 4514 sc->sc_wdcdev.sc_dev.dv_xname); 4515 if (intrstr != NULL) 4516 printf(" at %s", intrstr); 4517 printf("\n"); 4518 return; 4519 } 4520 4521 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4522 cp = &sc->pciide_channels[channel]; 4523 if (sii3114_chansetup(sc, channel) == 0) 4524 continue; 4525 sii3114_mapchan(cp); 4526 if (cp->hw_ok == 0) 4527 continue; 4528 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4529 } 4530 } 4531 4532 void 4533 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4534 { 4535 int chan, reg; 4536 bus_size_t size; 4537 struct pciide_satalink *sl = sc->sc_cookie; 4538 4539 sc->sc_wdcdev.dma_arg = sc; 4540 sc->sc_wdcdev.dma_init = pciide_dma_init; 4541 sc->sc_wdcdev.dma_start = pciide_dma_start; 4542 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4543 4544 /* 4545 * Slice off a subregion of BA5 for each of the channel's DMA 4546 * registers. 4547 */ 4548 4549 sc->sc_dma_iot = sl->ba5_st; 4550 for (chan = 0; chan < 4; chan++) { 4551 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4552 size = 4; 4553 if (size > (IDEDMA_SCH_OFFSET - reg)) 4554 size = IDEDMA_SCH_OFFSET - reg; 4555 if (bus_space_subregion(sl->ba5_st, 4556 sl->ba5_sh, 4557 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4558 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4559 sc->sc_dma_ok = 0; 4560 printf(": can't subregion offset " 4561 "%lu size %lu", 4562 (u_long) satalink_ba5_regmap[ 4563 chan].ba5_IDEDMA_CMD + reg, 4564 (u_long) size); 4565 return; 4566 } 4567 } 4568 } 4569 4570 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4571 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4572 sc->sc_dmactl_read = sii3114_dmactl_read; 4573 sc->sc_dmactl_write = sii3114_dmactl_write; 4574 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4575 4576 /* DMA registers all set up! */ 4577 sc->sc_dmat = pa->pa_dmat; 4578 sc->sc_dma_ok = 1; 4579 } 4580 4581 int 4582 sii3114_chansetup(struct pciide_softc *sc, int channel) 4583 { 4584 static const char *channel_names[] = { 4585 "port 0", 4586 "port 1", 4587 "port 2", 4588 "port 3", 4589 }; 4590 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4591 4592 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4593 4594 /* 4595 * We must always keep the Interrupt Steering bit set in channel 2's 4596 * IDEDMA_CMD register. 4597 */ 4598 if (channel == 2) 4599 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4600 4601 cp->name = channel_names[channel]; 4602 cp->wdc_channel.channel = channel; 4603 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4604 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4605 if (cp->wdc_channel.ch_queue == NULL) { 4606 printf("%s %s channel: " 4607 "cannot allocate channel queue", 4608 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4609 return (0); 4610 } 4611 return (1); 4612 } 4613 4614 void 4615 sii3114_mapchan(struct pciide_channel *cp) 4616 { 4617 struct channel_softc *wdc_cp = &cp->wdc_channel; 4618 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4619 struct pciide_satalink *sl = sc->sc_cookie; 4620 int chan = wdc_cp->channel; 4621 int i; 4622 4623 cp->hw_ok = 0; 4624 cp->compat = 0; 4625 cp->ih = sc->sc_pci_ih; 4626 4627 sl->regs[chan].cmd_iot = sl->ba5_st; 4628 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4629 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4630 9, &sl->regs[chan].cmd_baseioh) != 0) { 4631 printf("%s: couldn't subregion %s cmd base\n", 4632 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4633 return; 4634 } 4635 4636 sl->regs[chan].ctl_iot = sl->ba5_st; 4637 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4638 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4639 1, &cp->ctl_baseioh) != 0) { 4640 printf("%s: couldn't subregion %s ctl base\n", 4641 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4642 return; 4643 } 4644 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4645 4646 for (i = 0; i < WDC_NREG; i++) { 4647 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4648 sl->regs[chan].cmd_baseioh, 4649 i, i == 0 ? 4 : 1, 4650 &sl->regs[chan].cmd_iohs[i]) != 0) { 4651 printf("%s: couldn't subregion %s channel " 4652 "cmd regs\n", 4653 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4654 return; 4655 } 4656 } 4657 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4658 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4659 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4660 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4661 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4662 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4663 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4664 wdcattach(wdc_cp); 4665 cp->hw_ok = 1; 4666 } 4667 4668 u_int8_t 4669 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4670 { 4671 struct pciide_channel *cp = (struct pciide_channel *)chp; 4672 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4673 struct pciide_satalink *sl = sc->sc_cookie; 4674 4675 if (reg & _WDC_AUX) 4676 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4677 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4678 else 4679 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4680 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4681 } 4682 4683 void 4684 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4685 { 4686 struct pciide_channel *cp = (struct pciide_channel *)chp; 4687 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4688 struct pciide_satalink *sl = sc->sc_cookie; 4689 4690 if (reg & _WDC_AUX) 4691 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4692 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4693 else 4694 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4695 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4696 0, val); 4697 } 4698 4699 u_int8_t 4700 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4701 { 4702 struct pciide_satalink *sl = sc->sc_cookie; 4703 4704 return (bus_space_read_1(sc->sc_dma_iot, 4705 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4706 } 4707 4708 void 4709 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4710 { 4711 struct pciide_satalink *sl = sc->sc_cookie; 4712 4713 bus_space_write_1(sc->sc_dma_iot, 4714 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4715 } 4716 4717 u_int8_t 4718 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4719 { 4720 struct pciide_satalink *sl = sc->sc_cookie; 4721 4722 return (bus_space_read_1(sc->sc_dma_iot, 4723 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4724 } 4725 4726 void 4727 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4728 { 4729 struct pciide_satalink *sl = sc->sc_cookie; 4730 4731 bus_space_write_1(sc->sc_dma_iot, 4732 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4733 } 4734 4735 void 4736 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4737 { 4738 struct pciide_satalink *sl = sc->sc_cookie; 4739 4740 bus_space_write_4(sc->sc_dma_iot, 4741 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4742 } 4743 4744 void 4745 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4746 { 4747 struct pciide_channel *cp; 4748 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4749 bus_size_t cmdsize, ctlsize; 4750 struct pciide_cy *cy; 4751 4752 /* Allocate memory for private data */ 4753 sc->sc_cookie = malloc(sizeof(*cy), M_DEVBUF, M_NOWAIT | M_ZERO); 4754 cy = sc->sc_cookie; 4755 4756 /* 4757 * this chip has 2 PCI IDE functions, one for primary and one for 4758 * secondary. So we need to call pciide_mapregs_compat() with 4759 * the real channel 4760 */ 4761 if (pa->pa_function == 1) { 4762 cy->cy_compatchan = 0; 4763 } else if (pa->pa_function == 2) { 4764 cy->cy_compatchan = 1; 4765 } else { 4766 printf(": unexpected PCI function %d\n", pa->pa_function); 4767 return; 4768 } 4769 4770 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4771 printf(": DMA"); 4772 pciide_mapreg_dma(sc, pa); 4773 } else { 4774 printf(": no DMA"); 4775 sc->sc_dma_ok = 0; 4776 } 4777 4778 cy->cy_handle = cy82c693_init(pa->pa_iot); 4779 if (cy->cy_handle == NULL) { 4780 printf(", (unable to map ctl registers)"); 4781 sc->sc_dma_ok = 0; 4782 } 4783 4784 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4785 WDC_CAPABILITY_MODE; 4786 if (sc->sc_dma_ok) { 4787 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4788 sc->sc_wdcdev.irqack = pciide_irqack; 4789 } 4790 sc->sc_wdcdev.PIO_cap = 4; 4791 sc->sc_wdcdev.DMA_cap = 2; 4792 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4793 4794 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4795 sc->sc_wdcdev.nchannels = 1; 4796 4797 /* Only one channel for this chip; if we are here it's enabled */ 4798 cp = &sc->pciide_channels[0]; 4799 sc->wdc_chanarray[0] = &cp->wdc_channel; 4800 cp->name = PCIIDE_CHANNEL_NAME(0); 4801 cp->wdc_channel.channel = 0; 4802 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4803 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4804 if (cp->wdc_channel.ch_queue == NULL) { 4805 printf(": cannot allocate channel queue\n"); 4806 return; 4807 } 4808 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4809 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4810 "configured" : "wired"); 4811 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4812 printf("native-PCI\n"); 4813 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4814 pciide_pci_intr); 4815 } else { 4816 printf("compatibility\n"); 4817 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4818 &cmdsize, &ctlsize); 4819 } 4820 4821 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4822 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4823 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4824 if (cp->hw_ok == 0) 4825 return; 4826 wdcattach(&cp->wdc_channel); 4827 if (pciide_chan_candisable(cp)) { 4828 pci_conf_write(sc->sc_pc, sc->sc_tag, 4829 PCI_COMMAND_STATUS_REG, 0); 4830 } 4831 if (cp->hw_ok == 0) { 4832 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4833 interface); 4834 return; 4835 } 4836 4837 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4838 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4839 cy693_setup_channel(&cp->wdc_channel); 4840 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4841 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4842 } 4843 4844 void 4845 cy693_setup_channel(struct channel_softc *chp) 4846 { 4847 struct ata_drive_datas *drvp; 4848 int drive; 4849 u_int32_t cy_cmd_ctrl; 4850 u_int32_t idedma_ctl; 4851 struct pciide_channel *cp = (struct pciide_channel *)chp; 4852 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4853 int dma_mode = -1; 4854 struct pciide_cy *cy = sc->sc_cookie; 4855 4856 cy_cmd_ctrl = idedma_ctl = 0; 4857 4858 /* setup DMA if needed */ 4859 pciide_channel_dma_setup(cp); 4860 4861 for (drive = 0; drive < 2; drive++) { 4862 drvp = &chp->ch_drive[drive]; 4863 /* If no drive, skip */ 4864 if ((drvp->drive_flags & DRIVE) == 0) 4865 continue; 4866 /* add timing values, setup DMA if needed */ 4867 if (drvp->drive_flags & DRIVE_DMA) { 4868 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4869 /* use Multiword DMA */ 4870 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 4871 dma_mode = drvp->DMA_mode; 4872 } 4873 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4874 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 4875 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4876 CY_CMD_CTRL_IOW_REC_OFF(drive)); 4877 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4878 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 4879 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4880 CY_CMD_CTRL_IOR_REC_OFF(drive)); 4881 } 4882 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 4883 chp->ch_drive[0].DMA_mode = dma_mode; 4884 chp->ch_drive[1].DMA_mode = dma_mode; 4885 4886 if (dma_mode == -1) 4887 dma_mode = 0; 4888 4889 if (cy->cy_handle != NULL) { 4890 /* Note: `multiple' is implied. */ 4891 cy82c693_write(cy->cy_handle, 4892 (cy->cy_compatchan == 0) ? 4893 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 4894 } 4895 4896 pciide_print_modes(cp); 4897 4898 if (idedma_ctl != 0) { 4899 /* Add software bits in status register */ 4900 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4901 IDEDMA_CTL(chp->channel), idedma_ctl); 4902 } 4903 } 4904 4905 static struct sis_hostbr_type { 4906 u_int16_t id; 4907 u_int8_t rev; 4908 u_int8_t udma_mode; 4909 char *name; 4910 u_int8_t type; 4911 #define SIS_TYPE_NOUDMA 0 4912 #define SIS_TYPE_66 1 4913 #define SIS_TYPE_100OLD 2 4914 #define SIS_TYPE_100NEW 3 4915 #define SIS_TYPE_133OLD 4 4916 #define SIS_TYPE_133NEW 5 4917 #define SIS_TYPE_SOUTH 6 4918 } sis_hostbr_type[] = { 4919 /* Most infos here are from sos@freebsd.org */ 4920 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 4921 #if 0 4922 /* 4923 * controllers associated to a rev 0x2 530 Host to PCI Bridge 4924 * have problems with UDMA (info provided by Christos) 4925 */ 4926 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 4927 #endif 4928 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 4929 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 4930 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 4931 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 4932 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 4933 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 4934 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 4935 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 4936 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 4937 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 4938 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 4939 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 4940 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 4941 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 4942 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 4943 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 4944 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 4945 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 4946 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 4947 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 4948 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 4949 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 4950 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 4951 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 4952 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 4953 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 4954 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 4955 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 4956 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 4957 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 4958 /* 4959 * From sos@freebsd.org: the 0x961 ID will never be found in real world 4960 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 4961 */ 4962 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 4963 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 4964 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 4965 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW} 4966 }; 4967 4968 static struct sis_hostbr_type *sis_hostbr_type_match; 4969 4970 int 4971 sis_hostbr_match(struct pci_attach_args *pa) 4972 { 4973 int i; 4974 4975 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 4976 return (0); 4977 sis_hostbr_type_match = NULL; 4978 for (i = 0; 4979 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 4980 i++) { 4981 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 4982 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 4983 sis_hostbr_type_match = &sis_hostbr_type[i]; 4984 } 4985 return (sis_hostbr_type_match != NULL); 4986 } 4987 4988 int 4989 sis_south_match(struct pci_attach_args *pa) 4990 { 4991 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 4992 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 4993 PCI_REVISION(pa->pa_class) >= 0x10); 4994 } 4995 4996 void 4997 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4998 { 4999 struct pciide_channel *cp; 5000 int channel; 5001 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 5002 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5003 int rev = sc->sc_rev; 5004 bus_size_t cmdsize, ctlsize; 5005 struct pciide_sis *sis; 5006 5007 /* Allocate memory for private data */ 5008 sc->sc_cookie = malloc(sizeof(*sis), M_DEVBUF, M_NOWAIT | M_ZERO); 5009 sis = sc->sc_cookie; 5010 5011 pci_find_device(NULL, sis_hostbr_match); 5012 5013 if (sis_hostbr_type_match) { 5014 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 5015 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 5016 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5017 SIS_REG_57) & 0x7f); 5018 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 5019 sis->sis_type = SIS_TYPE_133NEW; 5020 sc->sc_wdcdev.UDMA_cap = 5021 sis_hostbr_type_match->udma_mode; 5022 } else { 5023 if (pci_find_device(NULL, sis_south_match)) { 5024 sis->sis_type = SIS_TYPE_133OLD; 5025 sc->sc_wdcdev.UDMA_cap = 5026 sis_hostbr_type_match->udma_mode; 5027 } else { 5028 sis->sis_type = SIS_TYPE_100NEW; 5029 sc->sc_wdcdev.UDMA_cap = 5030 sis_hostbr_type_match->udma_mode; 5031 } 5032 } 5033 } else { 5034 sis->sis_type = sis_hostbr_type_match->type; 5035 sc->sc_wdcdev.UDMA_cap = 5036 sis_hostbr_type_match->udma_mode; 5037 } 5038 printf(": %s", sis_hostbr_type_match->name); 5039 } else { 5040 printf(": 5597/5598"); 5041 if (rev >= 0xd0) { 5042 sc->sc_wdcdev.UDMA_cap = 2; 5043 sis->sis_type = SIS_TYPE_66; 5044 } else { 5045 sc->sc_wdcdev.UDMA_cap = 0; 5046 sis->sis_type = SIS_TYPE_NOUDMA; 5047 } 5048 } 5049 5050 printf(": DMA"); 5051 pciide_mapreg_dma(sc, pa); 5052 5053 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5054 WDC_CAPABILITY_MODE; 5055 if (sc->sc_dma_ok) { 5056 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5057 sc->sc_wdcdev.irqack = pciide_irqack; 5058 if (sis->sis_type >= SIS_TYPE_66) 5059 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5060 } 5061 5062 sc->sc_wdcdev.PIO_cap = 4; 5063 sc->sc_wdcdev.DMA_cap = 2; 5064 5065 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5066 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5067 switch (sis->sis_type) { 5068 case SIS_TYPE_NOUDMA: 5069 case SIS_TYPE_66: 5070 case SIS_TYPE_100OLD: 5071 sc->sc_wdcdev.set_modes = sis_setup_channel; 5072 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 5073 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 5074 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 5075 break; 5076 case SIS_TYPE_100NEW: 5077 case SIS_TYPE_133OLD: 5078 sc->sc_wdcdev.set_modes = sis_setup_channel; 5079 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 5080 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 5081 break; 5082 case SIS_TYPE_133NEW: 5083 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 5084 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 5085 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 5086 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 5087 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 5088 break; 5089 } 5090 5091 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5092 5093 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5094 cp = &sc->pciide_channels[channel]; 5095 if (pciide_chansetup(sc, channel, interface) == 0) 5096 continue; 5097 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 5098 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 5099 printf("%s: %s ignored (disabled)\n", 5100 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5101 continue; 5102 } 5103 pciide_map_compat_intr(pa, cp, channel, interface); 5104 if (cp->hw_ok == 0) 5105 continue; 5106 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5107 pciide_pci_intr); 5108 if (cp->hw_ok == 0) { 5109 pciide_unmap_compat_intr(pa, cp, channel, interface); 5110 continue; 5111 } 5112 if (pciide_chan_candisable(cp)) { 5113 if (channel == 0) 5114 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 5115 else 5116 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 5117 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 5118 sis_ctr0); 5119 } 5120 if (cp->hw_ok == 0) { 5121 pciide_unmap_compat_intr(pa, cp, channel, interface); 5122 continue; 5123 } 5124 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5125 } 5126 } 5127 5128 void 5129 sis96x_setup_channel(struct channel_softc *chp) 5130 { 5131 struct ata_drive_datas *drvp; 5132 int drive; 5133 u_int32_t sis_tim; 5134 u_int32_t idedma_ctl; 5135 int regtim; 5136 struct pciide_channel *cp = (struct pciide_channel *)chp; 5137 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5138 5139 sis_tim = 0; 5140 idedma_ctl = 0; 5141 /* setup DMA if needed */ 5142 pciide_channel_dma_setup(cp); 5143 5144 for (drive = 0; drive < 2; drive++) { 5145 regtim = SIS_TIM133( 5146 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 5147 chp->channel, drive); 5148 drvp = &chp->ch_drive[drive]; 5149 /* If no drive, skip */ 5150 if ((drvp->drive_flags & DRIVE) == 0) 5151 continue; 5152 /* add timing values, setup DMA if needed */ 5153 if (drvp->drive_flags & DRIVE_UDMA) { 5154 /* use Ultra/DMA */ 5155 drvp->drive_flags &= ~DRIVE_DMA; 5156 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5157 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 5158 if (drvp->UDMA_mode > 2) 5159 drvp->UDMA_mode = 2; 5160 } 5161 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 5162 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5163 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5164 } else if (drvp->drive_flags & DRIVE_DMA) { 5165 /* 5166 * use Multiword DMA 5167 * Timings will be used for both PIO and DMA, 5168 * so adjust DMA mode if needed 5169 */ 5170 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5171 drvp->PIO_mode = drvp->DMA_mode + 2; 5172 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5173 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5174 drvp->PIO_mode - 2 : 0; 5175 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 5176 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5177 } else { 5178 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5179 } 5180 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 5181 "channel %d drive %d: 0x%x (reg 0x%x)\n", 5182 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 5183 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 5184 } 5185 if (idedma_ctl != 0) { 5186 /* Add software bits in status register */ 5187 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5188 IDEDMA_CTL(chp->channel), idedma_ctl); 5189 } 5190 pciide_print_modes(cp); 5191 } 5192 5193 void 5194 sis_setup_channel(struct channel_softc *chp) 5195 { 5196 struct ata_drive_datas *drvp; 5197 int drive; 5198 u_int32_t sis_tim; 5199 u_int32_t idedma_ctl; 5200 struct pciide_channel *cp = (struct pciide_channel *)chp; 5201 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5202 struct pciide_sis *sis = sc->sc_cookie; 5203 5204 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 5205 "channel %d 0x%x\n", chp->channel, 5206 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 5207 DEBUG_PROBE); 5208 sis_tim = 0; 5209 idedma_ctl = 0; 5210 /* setup DMA if needed */ 5211 pciide_channel_dma_setup(cp); 5212 5213 for (drive = 0; drive < 2; drive++) { 5214 drvp = &chp->ch_drive[drive]; 5215 /* If no drive, skip */ 5216 if ((drvp->drive_flags & DRIVE) == 0) 5217 continue; 5218 /* add timing values, setup DMA if needed */ 5219 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5220 (drvp->drive_flags & DRIVE_UDMA) == 0) 5221 goto pio; 5222 5223 if (drvp->drive_flags & DRIVE_UDMA) { 5224 /* use Ultra/DMA */ 5225 drvp->drive_flags &= ~DRIVE_DMA; 5226 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5227 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 5228 if (drvp->UDMA_mode > 2) 5229 drvp->UDMA_mode = 2; 5230 } 5231 switch (sis->sis_type) { 5232 case SIS_TYPE_66: 5233 case SIS_TYPE_100OLD: 5234 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 5235 SIS_TIM66_UDMA_TIME_OFF(drive); 5236 break; 5237 case SIS_TYPE_100NEW: 5238 sis_tim |= 5239 sis_udma100new_tim[drvp->UDMA_mode] << 5240 SIS_TIM100_UDMA_TIME_OFF(drive); 5241 break; 5242 case SIS_TYPE_133OLD: 5243 sis_tim |= 5244 sis_udma133old_tim[drvp->UDMA_mode] << 5245 SIS_TIM100_UDMA_TIME_OFF(drive); 5246 break; 5247 default: 5248 printf("unknown SiS IDE type %d\n", 5249 sis->sis_type); 5250 } 5251 } else { 5252 /* 5253 * use Multiword DMA 5254 * Timings will be used for both PIO and DMA, 5255 * so adjust DMA mode if needed 5256 */ 5257 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5258 drvp->PIO_mode = drvp->DMA_mode + 2; 5259 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5260 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5261 drvp->PIO_mode - 2 : 0; 5262 if (drvp->DMA_mode == 0) 5263 drvp->PIO_mode = 0; 5264 } 5265 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5266 pio: switch (sis->sis_type) { 5267 case SIS_TYPE_NOUDMA: 5268 case SIS_TYPE_66: 5269 case SIS_TYPE_100OLD: 5270 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5271 SIS_TIM66_ACT_OFF(drive); 5272 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5273 SIS_TIM66_REC_OFF(drive); 5274 break; 5275 case SIS_TYPE_100NEW: 5276 case SIS_TYPE_133OLD: 5277 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5278 SIS_TIM100_ACT_OFF(drive); 5279 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5280 SIS_TIM100_REC_OFF(drive); 5281 break; 5282 default: 5283 printf("unknown SiS IDE type %d\n", 5284 sis->sis_type); 5285 } 5286 } 5287 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 5288 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 5289 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 5290 if (idedma_ctl != 0) { 5291 /* Add software bits in status register */ 5292 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5293 IDEDMA_CTL(chp->channel), idedma_ctl); 5294 } 5295 pciide_print_modes(cp); 5296 } 5297 5298 void 5299 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5300 { 5301 struct pciide_channel *cp; 5302 int channel; 5303 pcireg_t interface, ctl; 5304 bus_size_t cmdsize, ctlsize; 5305 5306 printf(": DMA"); 5307 pciide_mapreg_dma(sc, pa); 5308 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 5309 5310 if (sc->sc_dma_ok) { 5311 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5312 sc->sc_wdcdev.irqack = natsemi_irqack; 5313 } 5314 5315 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5316 5317 /* 5318 * Mask off interrupts from both channels, appropriate channel(s) 5319 * will be unmasked later. 5320 */ 5321 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5322 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5323 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5324 5325 sc->sc_wdcdev.PIO_cap = 4; 5326 sc->sc_wdcdev.DMA_cap = 2; 5327 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5328 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5329 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5330 5331 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5332 PCI_CLASS_REG)); 5333 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5334 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5335 5336 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5337 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5338 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5339 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5340 else 5341 ctl |= NATSEMI_CTRL1_INTAMASK; 5342 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5343 5344 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5345 cp = &sc->pciide_channels[channel]; 5346 if (pciide_chansetup(sc, channel, interface) == 0) 5347 continue; 5348 5349 pciide_map_compat_intr(pa, cp, channel, interface); 5350 if (cp->hw_ok == 0) 5351 continue; 5352 5353 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5354 natsemi_pci_intr); 5355 if (cp->hw_ok == 0) { 5356 pciide_unmap_compat_intr(pa, cp, channel, interface); 5357 continue; 5358 } 5359 natsemi_setup_channel(&cp->wdc_channel); 5360 } 5361 } 5362 5363 void 5364 natsemi_setup_channel(struct channel_softc *chp) 5365 { 5366 struct ata_drive_datas *drvp; 5367 int drive, ndrives = 0; 5368 u_int32_t idedma_ctl = 0; 5369 struct pciide_channel *cp = (struct pciide_channel *)chp; 5370 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5371 u_int8_t tim; 5372 5373 /* setup DMA if needed */ 5374 pciide_channel_dma_setup(cp); 5375 5376 for (drive = 0; drive < 2; drive++) { 5377 drvp = &chp->ch_drive[drive]; 5378 /* If no drive, skip */ 5379 if ((drvp->drive_flags & DRIVE) == 0) 5380 continue; 5381 5382 ndrives++; 5383 /* add timing values, setup DMA if needed */ 5384 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5385 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5386 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5387 } else { 5388 /* 5389 * use Multiword DMA 5390 * Timings will be used for both PIO and DMA, 5391 * so adjust DMA mode if needed 5392 */ 5393 if (drvp->PIO_mode >= 3 && 5394 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5395 drvp->DMA_mode = drvp->PIO_mode - 2; 5396 } 5397 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5398 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5399 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5400 } 5401 5402 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5403 NATSEMI_RTREG(chp->channel, drive), tim); 5404 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5405 NATSEMI_WTREG(chp->channel, drive), tim); 5406 } 5407 if (idedma_ctl != 0) { 5408 /* Add software bits in status register */ 5409 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5410 IDEDMA_CTL(chp->channel), idedma_ctl); 5411 } 5412 if (ndrives > 0) { 5413 /* Unmask the channel if at least one drive is found */ 5414 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5415 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5416 ~(NATSEMI_CHMASK(chp->channel))); 5417 } 5418 5419 pciide_print_modes(cp); 5420 5421 /* Go ahead and ack interrupts generated during probe. */ 5422 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5423 IDEDMA_CTL(chp->channel), 5424 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5425 IDEDMA_CTL(chp->channel))); 5426 } 5427 5428 void 5429 natsemi_irqack(struct channel_softc *chp) 5430 { 5431 struct pciide_channel *cp = (struct pciide_channel *)chp; 5432 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5433 u_int8_t clr; 5434 5435 /* The "clear" bits are in the wrong register *sigh* */ 5436 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5437 IDEDMA_CMD(chp->channel)); 5438 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5439 IDEDMA_CTL(chp->channel)) & 5440 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5441 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5442 IDEDMA_CMD(chp->channel), clr); 5443 } 5444 5445 int 5446 natsemi_pci_intr(void *arg) 5447 { 5448 struct pciide_softc *sc = arg; 5449 struct pciide_channel *cp; 5450 struct channel_softc *wdc_cp; 5451 int i, rv, crv; 5452 u_int8_t msk; 5453 5454 rv = 0; 5455 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5456 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5457 cp = &sc->pciide_channels[i]; 5458 wdc_cp = &cp->wdc_channel; 5459 5460 /* If a compat channel skip. */ 5461 if (cp->compat) 5462 continue; 5463 5464 /* If this channel is masked, skip it. */ 5465 if (msk & NATSEMI_CHMASK(i)) 5466 continue; 5467 5468 if (pciide_intr_flag(cp) == 0) 5469 continue; 5470 5471 crv = wdcintr(wdc_cp); 5472 if (crv == 0) 5473 ; /* leave rv alone */ 5474 else if (crv == 1) 5475 rv = 1; /* claim the intr */ 5476 else if (rv == 0) /* crv should be -1 in this case */ 5477 rv = crv; /* if we've done no better, take it */ 5478 } 5479 return (rv); 5480 } 5481 5482 void 5483 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5484 { 5485 struct pciide_channel *cp; 5486 int channel; 5487 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5488 bus_size_t cmdsize, ctlsize; 5489 5490 printf(": DMA"); 5491 pciide_mapreg_dma(sc, pa); 5492 5493 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5494 WDC_CAPABILITY_MODE; 5495 if (sc->sc_dma_ok) { 5496 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5497 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5498 sc->sc_wdcdev.irqack = pciide_irqack; 5499 } 5500 sc->sc_wdcdev.PIO_cap = 4; 5501 sc->sc_wdcdev.DMA_cap = 2; 5502 sc->sc_wdcdev.UDMA_cap = 2; 5503 5504 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5505 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5506 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5507 5508 /* 5509 * Soekris net4801 errata 0003: 5510 * 5511 * The SC1100 built in busmaster IDE controller is pretty standard, 5512 * but have two bugs: data transfers need to be dword aligned and 5513 * it cannot do an exact 64Kbyte data transfer. 5514 * 5515 * Assume that reducing maximum segment size by one page 5516 * will be enough, and restrict boundary too for extra certainty. 5517 */ 5518 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5519 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5520 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5521 } 5522 5523 /* 5524 * This chip seems to be unable to do one-sector transfers 5525 * using DMA. 5526 */ 5527 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5528 5529 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5530 5531 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5532 cp = &sc->pciide_channels[channel]; 5533 if (pciide_chansetup(sc, channel, interface) == 0) 5534 continue; 5535 pciide_map_compat_intr(pa, cp, channel, interface); 5536 if (cp->hw_ok == 0) 5537 continue; 5538 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5539 pciide_pci_intr); 5540 if (cp->hw_ok == 0) { 5541 pciide_unmap_compat_intr(pa, cp, channel, interface); 5542 continue; 5543 } 5544 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5545 } 5546 } 5547 5548 void 5549 ns_scx200_setup_channel(struct channel_softc *chp) 5550 { 5551 struct ata_drive_datas *drvp; 5552 int drive, mode; 5553 u_int32_t idedma_ctl; 5554 struct pciide_channel *cp = (struct pciide_channel*)chp; 5555 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5556 int channel = chp->channel; 5557 int pioformat; 5558 pcireg_t piotim, dmatim; 5559 5560 /* Setup DMA if needed */ 5561 pciide_channel_dma_setup(cp); 5562 5563 idedma_ctl = 0; 5564 5565 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5566 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5567 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5568 DEBUG_PROBE); 5569 5570 /* Per channel settings */ 5571 for (drive = 0; drive < 2; drive++) { 5572 drvp = &chp->ch_drive[drive]; 5573 5574 /* If no drive, skip */ 5575 if ((drvp->drive_flags & DRIVE) == 0) 5576 continue; 5577 5578 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5579 SCx200_TIM_PIO(channel, drive)); 5580 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5581 SCx200_TIM_DMA(channel, drive)); 5582 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5583 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5584 piotim, dmatim), DEBUG_PROBE); 5585 5586 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5587 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5588 /* Setup UltraDMA mode */ 5589 drvp->drive_flags &= ~DRIVE_DMA; 5590 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5591 dmatim = scx200_udma33[drvp->UDMA_mode]; 5592 mode = drvp->PIO_mode; 5593 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5594 (drvp->drive_flags & DRIVE_DMA) != 0) { 5595 /* Setup multiword DMA mode */ 5596 drvp->drive_flags &= ~DRIVE_UDMA; 5597 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5598 dmatim = scx200_dma33[drvp->DMA_mode]; 5599 5600 /* mode = min(pio, dma + 2) */ 5601 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5602 mode = drvp->PIO_mode; 5603 else 5604 mode = drvp->DMA_mode + 2; 5605 } else { 5606 mode = drvp->PIO_mode; 5607 } 5608 5609 /* Setup PIO mode */ 5610 drvp->PIO_mode = mode; 5611 if (mode < 2) 5612 drvp->DMA_mode = 0; 5613 else 5614 drvp->DMA_mode = mode - 2; 5615 5616 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5617 5618 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5619 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5620 piotim, dmatim), DEBUG_PROBE); 5621 5622 pci_conf_write(sc->sc_pc, sc->sc_tag, 5623 SCx200_TIM_PIO(channel, drive), piotim); 5624 pci_conf_write(sc->sc_pc, sc->sc_tag, 5625 SCx200_TIM_DMA(channel, drive), dmatim); 5626 } 5627 5628 if (idedma_ctl != 0) { 5629 /* Add software bits in status register */ 5630 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5631 IDEDMA_CTL(channel), idedma_ctl); 5632 } 5633 5634 pciide_print_modes(cp); 5635 } 5636 5637 void 5638 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5639 { 5640 struct pciide_channel *cp; 5641 int channel; 5642 pcireg_t cr, interface; 5643 bus_size_t cmdsize, ctlsize; 5644 int rev = sc->sc_rev; 5645 5646 printf(": DMA"); 5647 pciide_mapreg_dma(sc, pa); 5648 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5649 WDC_CAPABILITY_MODE; 5650 5651 if (sc->sc_dma_ok) { 5652 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5653 if (rev >= 0x20) { 5654 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5655 if (rev >= 0xC4) 5656 sc->sc_wdcdev.UDMA_cap = 5; 5657 else if (rev >= 0xC2) 5658 sc->sc_wdcdev.UDMA_cap = 4; 5659 else 5660 sc->sc_wdcdev.UDMA_cap = 2; 5661 } 5662 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5663 sc->sc_wdcdev.irqack = pciide_irqack; 5664 if (rev <= 0xC4) 5665 sc->sc_wdcdev.dma_init = acer_dma_init; 5666 } 5667 5668 sc->sc_wdcdev.PIO_cap = 4; 5669 sc->sc_wdcdev.DMA_cap = 2; 5670 sc->sc_wdcdev.set_modes = acer_setup_channel; 5671 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5672 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5673 5674 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5675 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5676 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5677 5678 /* Enable "microsoft register bits" R/W. */ 5679 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5680 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5681 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5682 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5683 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5684 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5685 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5686 ~ACER_CHANSTATUSREGS_RO); 5687 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5688 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5689 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5690 /* Don't use cr, re-read the real register content instead */ 5691 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5692 PCI_CLASS_REG)); 5693 5694 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5695 5696 /* From linux: enable "Cable Detection" */ 5697 if (rev >= 0xC2) 5698 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5699 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5700 | ACER_0x4B_CDETECT); 5701 5702 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5703 cp = &sc->pciide_channels[channel]; 5704 if (pciide_chansetup(sc, channel, interface) == 0) 5705 continue; 5706 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5707 printf("%s: %s ignored (disabled)\n", 5708 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5709 continue; 5710 } 5711 pciide_map_compat_intr(pa, cp, channel, interface); 5712 if (cp->hw_ok == 0) 5713 continue; 5714 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5715 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5716 if (cp->hw_ok == 0) { 5717 pciide_unmap_compat_intr(pa, cp, channel, interface); 5718 continue; 5719 } 5720 if (pciide_chan_candisable(cp)) { 5721 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5722 pci_conf_write(sc->sc_pc, sc->sc_tag, 5723 PCI_CLASS_REG, cr); 5724 } 5725 if (cp->hw_ok == 0) { 5726 pciide_unmap_compat_intr(pa, cp, channel, interface); 5727 continue; 5728 } 5729 acer_setup_channel(&cp->wdc_channel); 5730 } 5731 } 5732 5733 void 5734 acer_setup_channel(struct channel_softc *chp) 5735 { 5736 struct ata_drive_datas *drvp; 5737 int drive; 5738 u_int32_t acer_fifo_udma; 5739 u_int32_t idedma_ctl; 5740 struct pciide_channel *cp = (struct pciide_channel *)chp; 5741 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5742 5743 idedma_ctl = 0; 5744 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5745 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5746 acer_fifo_udma), DEBUG_PROBE); 5747 /* setup DMA if needed */ 5748 pciide_channel_dma_setup(cp); 5749 5750 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5751 DRIVE_UDMA) { /* check 80 pins cable */ 5752 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5753 ACER_0x4A_80PIN(chp->channel)) { 5754 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5755 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5756 DEBUG_PROBE); 5757 if (chp->ch_drive[0].UDMA_mode > 2) 5758 chp->ch_drive[0].UDMA_mode = 2; 5759 if (chp->ch_drive[1].UDMA_mode > 2) 5760 chp->ch_drive[1].UDMA_mode = 2; 5761 } 5762 } 5763 5764 for (drive = 0; drive < 2; drive++) { 5765 drvp = &chp->ch_drive[drive]; 5766 /* If no drive, skip */ 5767 if ((drvp->drive_flags & DRIVE) == 0) 5768 continue; 5769 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5770 "channel %d drive %d 0x%x\n", chp->channel, drive, 5771 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5772 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5773 /* clear FIFO/DMA mode */ 5774 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5775 ACER_UDMA_EN(chp->channel, drive) | 5776 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5777 5778 /* add timing values, setup DMA if needed */ 5779 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5780 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5781 acer_fifo_udma |= 5782 ACER_FTH_OPL(chp->channel, drive, 0x1); 5783 goto pio; 5784 } 5785 5786 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5787 if (drvp->drive_flags & DRIVE_UDMA) { 5788 /* use Ultra/DMA */ 5789 drvp->drive_flags &= ~DRIVE_DMA; 5790 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5791 acer_fifo_udma |= 5792 ACER_UDMA_TIM(chp->channel, drive, 5793 acer_udma[drvp->UDMA_mode]); 5794 /* XXX disable if one drive < UDMA3 ? */ 5795 if (drvp->UDMA_mode >= 3) { 5796 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5797 ACER_0x4B, 5798 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5799 ACER_0x4B) | ACER_0x4B_UDMA66); 5800 } 5801 } else { 5802 /* 5803 * use Multiword DMA 5804 * Timings will be used for both PIO and DMA, 5805 * so adjust DMA mode if needed 5806 */ 5807 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5808 drvp->PIO_mode = drvp->DMA_mode + 2; 5809 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5810 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5811 drvp->PIO_mode - 2 : 0; 5812 if (drvp->DMA_mode == 0) 5813 drvp->PIO_mode = 0; 5814 } 5815 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5816 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5817 ACER_IDETIM(chp->channel, drive), 5818 acer_pio[drvp->PIO_mode]); 5819 } 5820 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5821 acer_fifo_udma), DEBUG_PROBE); 5822 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5823 if (idedma_ctl != 0) { 5824 /* Add software bits in status register */ 5825 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5826 IDEDMA_CTL(chp->channel), idedma_ctl); 5827 } 5828 pciide_print_modes(cp); 5829 } 5830 5831 int 5832 acer_pci_intr(void *arg) 5833 { 5834 struct pciide_softc *sc = arg; 5835 struct pciide_channel *cp; 5836 struct channel_softc *wdc_cp; 5837 int i, rv, crv; 5838 u_int32_t chids; 5839 5840 rv = 0; 5841 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5842 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5843 cp = &sc->pciide_channels[i]; 5844 wdc_cp = &cp->wdc_channel; 5845 /* If a compat channel skip. */ 5846 if (cp->compat) 5847 continue; 5848 if (chids & ACER_CHIDS_INT(i)) { 5849 crv = wdcintr(wdc_cp); 5850 if (crv == 0) 5851 printf("%s:%d: bogus intr\n", 5852 sc->sc_wdcdev.sc_dev.dv_xname, i); 5853 else 5854 rv = 1; 5855 } 5856 } 5857 return (rv); 5858 } 5859 5860 int 5861 acer_dma_init(void *v, int channel, int drive, void *databuf, 5862 size_t datalen, int flags) 5863 { 5864 /* Use PIO for LBA48 transfers. */ 5865 if (flags & WDC_DMA_LBA48) 5866 return (EINVAL); 5867 5868 return (pciide_dma_init(v, channel, drive, databuf, datalen, flags)); 5869 } 5870 5871 void 5872 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5873 { 5874 struct pciide_channel *cp; 5875 int i, compatchan, revision; 5876 pcireg_t interface; 5877 bus_size_t cmdsize, ctlsize; 5878 5879 revision = sc->sc_rev; 5880 5881 /* 5882 * when the chip is in native mode it identifies itself as a 5883 * 'misc mass storage'. Fake interface in this case. 5884 */ 5885 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 5886 interface = PCI_INTERFACE(pa->pa_class); 5887 } else { 5888 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 5889 PCIIDE_INTERFACE_PCI(0); 5890 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5891 (revision == HPT370_REV || revision == HPT370A_REV || 5892 revision == HPT372_REV)) || 5893 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5894 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5895 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5896 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5897 interface |= PCIIDE_INTERFACE_PCI(1); 5898 } 5899 5900 printf(": DMA"); 5901 pciide_mapreg_dma(sc, pa); 5902 printf("\n"); 5903 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5904 WDC_CAPABILITY_MODE; 5905 if (sc->sc_dma_ok) { 5906 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5907 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5908 sc->sc_wdcdev.irqack = pciide_irqack; 5909 } 5910 sc->sc_wdcdev.PIO_cap = 4; 5911 sc->sc_wdcdev.DMA_cap = 2; 5912 5913 sc->sc_wdcdev.set_modes = hpt_setup_channel; 5914 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5915 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5916 revision == HPT366_REV) { 5917 sc->sc_wdcdev.UDMA_cap = 4; 5918 /* 5919 * The 366 has 2 PCI IDE functions, one for primary and one 5920 * for secondary. So we need to call pciide_mapregs_compat() 5921 * with the real channel 5922 */ 5923 if (pa->pa_function == 0) { 5924 compatchan = 0; 5925 } else if (pa->pa_function == 1) { 5926 compatchan = 1; 5927 } else { 5928 printf("%s: unexpected PCI function %d\n", 5929 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 5930 return; 5931 } 5932 sc->sc_wdcdev.nchannels = 1; 5933 } else { 5934 sc->sc_wdcdev.nchannels = 2; 5935 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5936 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5937 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5938 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5939 sc->sc_wdcdev.UDMA_cap = 6; 5940 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 5941 if (revision == HPT372_REV) 5942 sc->sc_wdcdev.UDMA_cap = 6; 5943 else 5944 sc->sc_wdcdev.UDMA_cap = 5; 5945 } 5946 } 5947 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5948 cp = &sc->pciide_channels[i]; 5949 if (sc->sc_wdcdev.nchannels > 1) { 5950 compatchan = i; 5951 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 5952 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 5953 printf("%s: %s ignored (disabled)\n", 5954 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5955 continue; 5956 } 5957 } 5958 if (pciide_chansetup(sc, i, interface) == 0) 5959 continue; 5960 if (interface & PCIIDE_INTERFACE_PCI(i)) { 5961 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 5962 &ctlsize, hpt_pci_intr); 5963 } else { 5964 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 5965 &cmdsize, &ctlsize); 5966 } 5967 if (cp->hw_ok == 0) 5968 return; 5969 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 5970 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 5971 wdcattach(&cp->wdc_channel); 5972 hpt_setup_channel(&cp->wdc_channel); 5973 } 5974 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5975 (revision == HPT370_REV || revision == HPT370A_REV || 5976 revision == HPT372_REV)) || 5977 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5978 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5979 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5980 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 5981 /* 5982 * Turn off fast interrupts 5983 */ 5984 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 5985 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 5986 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5987 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 5988 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 5989 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5990 5991 /* 5992 * HPT370 and highter has a bit to disable interrupts, 5993 * make sure to clear it 5994 */ 5995 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 5996 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 5997 ~HPT_CSEL_IRQDIS); 5998 } 5999 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 6000 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6001 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6002 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6003 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 6004 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6005 revision == HPT372_REV)) 6006 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 6007 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 6008 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 6009 6010 return; 6011 } 6012 6013 void 6014 hpt_setup_channel(struct channel_softc *chp) 6015 { 6016 struct ata_drive_datas *drvp; 6017 int drive; 6018 int cable; 6019 u_int32_t before, after; 6020 u_int32_t idedma_ctl; 6021 struct pciide_channel *cp = (struct pciide_channel *)chp; 6022 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6023 int revision = sc->sc_rev; 6024 u_int32_t *tim_pio, *tim_dma, *tim_udma; 6025 6026 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 6027 6028 /* setup DMA if needed */ 6029 pciide_channel_dma_setup(cp); 6030 6031 idedma_ctl = 0; 6032 6033 switch (sc->sc_pp->ide_product) { 6034 case PCI_PRODUCT_TRIONES_HPT366: 6035 if (revision == HPT370_REV || 6036 revision == HPT370A_REV) { 6037 tim_pio = hpt370_pio; 6038 tim_dma = hpt370_dma; 6039 tim_udma = hpt370_udma; 6040 } else if (revision == HPT372_REV) { 6041 tim_pio = hpt372_pio; 6042 tim_dma = hpt372_dma; 6043 tim_udma = hpt372_udma; 6044 } else { 6045 tim_pio = hpt366_pio; 6046 tim_dma = hpt366_dma; 6047 tim_udma = hpt366_udma; 6048 } 6049 break; 6050 case PCI_PRODUCT_TRIONES_HPT372A: 6051 case PCI_PRODUCT_TRIONES_HPT302: 6052 case PCI_PRODUCT_TRIONES_HPT371: 6053 tim_pio = hpt372_pio; 6054 tim_dma = hpt372_dma; 6055 tim_udma = hpt372_udma; 6056 break; 6057 case PCI_PRODUCT_TRIONES_HPT374: 6058 tim_pio = hpt374_pio; 6059 tim_dma = hpt374_dma; 6060 tim_udma = hpt374_udma; 6061 break; 6062 default: 6063 printf("%s: no known timing values\n", 6064 sc->sc_wdcdev.sc_dev.dv_xname); 6065 goto end; 6066 } 6067 6068 /* Per drive settings */ 6069 for (drive = 0; drive < 2; drive++) { 6070 drvp = &chp->ch_drive[drive]; 6071 /* If no drive, skip */ 6072 if ((drvp->drive_flags & DRIVE) == 0) 6073 continue; 6074 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 6075 HPT_IDETIM(chp->channel, drive)); 6076 6077 /* add timing values, setup DMA if needed */ 6078 if (drvp->drive_flags & DRIVE_UDMA) { 6079 /* use Ultra/DMA */ 6080 drvp->drive_flags &= ~DRIVE_DMA; 6081 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 6082 drvp->UDMA_mode > 2) { 6083 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6084 "cable not detected\n", drvp->drive_name, 6085 sc->sc_wdcdev.sc_dev.dv_xname, 6086 chp->channel, drive), DEBUG_PROBE); 6087 drvp->UDMA_mode = 2; 6088 } 6089 after = tim_udma[drvp->UDMA_mode]; 6090 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6091 } else if (drvp->drive_flags & DRIVE_DMA) { 6092 /* 6093 * use Multiword DMA. 6094 * Timings will be used for both PIO and DMA, so adjust 6095 * DMA mode if needed 6096 */ 6097 if (drvp->PIO_mode >= 3 && 6098 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 6099 drvp->DMA_mode = drvp->PIO_mode - 2; 6100 } 6101 after = tim_dma[drvp->DMA_mode]; 6102 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6103 } else { 6104 /* PIO only */ 6105 after = tim_pio[drvp->PIO_mode]; 6106 } 6107 pci_conf_write(sc->sc_pc, sc->sc_tag, 6108 HPT_IDETIM(chp->channel, drive), after); 6109 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 6110 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 6111 after, before), DEBUG_PROBE); 6112 } 6113 end: 6114 if (idedma_ctl != 0) { 6115 /* Add software bits in status register */ 6116 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6117 IDEDMA_CTL(chp->channel), idedma_ctl); 6118 } 6119 pciide_print_modes(cp); 6120 } 6121 6122 int 6123 hpt_pci_intr(void *arg) 6124 { 6125 struct pciide_softc *sc = arg; 6126 struct pciide_channel *cp; 6127 struct channel_softc *wdc_cp; 6128 int rv = 0; 6129 int dmastat, i, crv; 6130 6131 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6132 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6133 IDEDMA_CTL(i)); 6134 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 6135 IDEDMA_CTL_INTR) 6136 continue; 6137 cp = &sc->pciide_channels[i]; 6138 wdc_cp = &cp->wdc_channel; 6139 crv = wdcintr(wdc_cp); 6140 if (crv == 0) { 6141 printf("%s:%d: bogus intr\n", 6142 sc->sc_wdcdev.sc_dev.dv_xname, i); 6143 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6144 IDEDMA_CTL(i), dmastat); 6145 } else 6146 rv = 1; 6147 } 6148 return (rv); 6149 } 6150 6151 /* Macros to test product */ 6152 #define PDC_IS_262(sc) \ 6153 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 6154 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6155 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 6156 #define PDC_IS_265(sc) \ 6157 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6158 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 6159 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6160 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6161 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6162 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6163 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6164 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6165 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6166 #define PDC_IS_268(sc) \ 6167 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6168 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6169 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6170 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6171 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6172 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6173 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6174 #define PDC_IS_269(sc) \ 6175 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6176 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6177 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6178 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6179 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6180 6181 u_int8_t 6182 pdc268_config_read(struct channel_softc *chp, int index) 6183 { 6184 struct pciide_channel *cp = (struct pciide_channel *)chp; 6185 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6186 int channel = chp->channel; 6187 6188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6189 PDC268_INDEX(channel), index); 6190 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6191 PDC268_DATA(channel))); 6192 } 6193 6194 void 6195 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6196 { 6197 struct pciide_channel *cp; 6198 int channel; 6199 pcireg_t interface, st, mode; 6200 bus_size_t cmdsize, ctlsize; 6201 6202 if (!PDC_IS_268(sc)) { 6203 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6204 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 6205 st), DEBUG_PROBE); 6206 } 6207 6208 /* turn off RAID mode */ 6209 if (!PDC_IS_268(sc)) 6210 st &= ~PDC2xx_STATE_IDERAID; 6211 6212 /* 6213 * can't rely on the PCI_CLASS_REG content if the chip was in raid 6214 * mode. We have to fake interface 6215 */ 6216 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 6217 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 6218 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 6219 6220 printf(": DMA"); 6221 pciide_mapreg_dma(sc, pa); 6222 6223 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6224 WDC_CAPABILITY_MODE; 6225 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 6226 PDC_IS_262(sc)) 6227 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 6228 if (sc->sc_dma_ok) { 6229 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6230 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6231 sc->sc_wdcdev.irqack = pciide_irqack; 6232 } 6233 sc->sc_wdcdev.PIO_cap = 4; 6234 sc->sc_wdcdev.DMA_cap = 2; 6235 if (PDC_IS_269(sc)) 6236 sc->sc_wdcdev.UDMA_cap = 6; 6237 else if (PDC_IS_265(sc)) 6238 sc->sc_wdcdev.UDMA_cap = 5; 6239 else if (PDC_IS_262(sc)) 6240 sc->sc_wdcdev.UDMA_cap = 4; 6241 else 6242 sc->sc_wdcdev.UDMA_cap = 2; 6243 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 6244 pdc20268_setup_channel : pdc202xx_setup_channel; 6245 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6246 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6247 6248 if (PDC_IS_262(sc)) { 6249 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 6250 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 6251 } 6252 6253 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6254 if (!PDC_IS_268(sc)) { 6255 /* setup failsafe defaults */ 6256 mode = 0; 6257 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 6258 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 6259 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 6260 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 6261 for (channel = 0; 6262 channel < sc->sc_wdcdev.nchannels; 6263 channel++) { 6264 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6265 "drive 0 initial timings 0x%x, now 0x%x\n", 6266 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6267 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 6268 DEBUG_PROBE); 6269 pci_conf_write(sc->sc_pc, sc->sc_tag, 6270 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 6271 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6272 "drive 1 initial timings 0x%x, now 0x%x\n", 6273 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6274 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 6275 pci_conf_write(sc->sc_pc, sc->sc_tag, 6276 PDC2xx_TIM(channel, 1), mode); 6277 } 6278 6279 mode = PDC2xx_SCR_DMA; 6280 if (PDC_IS_262(sc)) { 6281 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 6282 } else { 6283 /* the BIOS set it up this way */ 6284 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 6285 } 6286 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 6287 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 6288 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 6289 "now 0x%x\n", 6290 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6291 PDC2xx_SCR), 6292 mode), DEBUG_PROBE); 6293 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6294 PDC2xx_SCR, mode); 6295 6296 /* controller initial state register is OK even without BIOS */ 6297 /* Set DMA mode to IDE DMA compatibility */ 6298 mode = 6299 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 6300 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 6301 DEBUG_PROBE); 6302 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 6303 mode | 0x1); 6304 mode = 6305 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 6306 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 6307 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 6308 mode | 0x1); 6309 } 6310 6311 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6312 cp = &sc->pciide_channels[channel]; 6313 if (pciide_chansetup(sc, channel, interface) == 0) 6314 continue; 6315 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 6316 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 6317 printf("%s: %s ignored (disabled)\n", 6318 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6319 continue; 6320 } 6321 pciide_map_compat_intr(pa, cp, channel, interface); 6322 if (cp->hw_ok == 0) 6323 continue; 6324 if (PDC_IS_265(sc)) 6325 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6326 pdc20265_pci_intr); 6327 else 6328 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6329 pdc202xx_pci_intr); 6330 if (cp->hw_ok == 0) { 6331 pciide_unmap_compat_intr(pa, cp, channel, interface); 6332 continue; 6333 } 6334 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6335 st &= ~(PDC_IS_262(sc) ? 6336 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6337 pciide_unmap_compat_intr(pa, cp, channel, interface); 6338 } 6339 if (PDC_IS_268(sc)) 6340 pdc20268_setup_channel(&cp->wdc_channel); 6341 else 6342 pdc202xx_setup_channel(&cp->wdc_channel); 6343 } 6344 if (!PDC_IS_268(sc)) { 6345 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6346 "0x%x\n", st), DEBUG_PROBE); 6347 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6348 } 6349 return; 6350 } 6351 6352 void 6353 pdc202xx_setup_channel(struct channel_softc *chp) 6354 { 6355 struct ata_drive_datas *drvp; 6356 int drive; 6357 pcireg_t mode, st; 6358 u_int32_t idedma_ctl, scr, atapi; 6359 struct pciide_channel *cp = (struct pciide_channel *)chp; 6360 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6361 int channel = chp->channel; 6362 6363 /* setup DMA if needed */ 6364 pciide_channel_dma_setup(cp); 6365 6366 idedma_ctl = 0; 6367 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6368 sc->sc_wdcdev.sc_dev.dv_xname, 6369 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6370 DEBUG_PROBE); 6371 6372 /* Per channel settings */ 6373 if (PDC_IS_262(sc)) { 6374 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6375 PDC262_U66); 6376 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6377 /* Check cable */ 6378 if ((st & PDC262_STATE_80P(channel)) != 0 && 6379 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6380 chp->ch_drive[0].UDMA_mode > 2) || 6381 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6382 chp->ch_drive[1].UDMA_mode > 2))) { 6383 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6384 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6385 DEBUG_PROBE); 6386 if (chp->ch_drive[0].UDMA_mode > 2) 6387 chp->ch_drive[0].UDMA_mode = 2; 6388 if (chp->ch_drive[1].UDMA_mode > 2) 6389 chp->ch_drive[1].UDMA_mode = 2; 6390 } 6391 /* Trim UDMA mode */ 6392 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6393 chp->ch_drive[0].UDMA_mode <= 2) || 6394 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6395 chp->ch_drive[1].UDMA_mode <= 2)) { 6396 if (chp->ch_drive[0].UDMA_mode > 2) 6397 chp->ch_drive[0].UDMA_mode = 2; 6398 if (chp->ch_drive[1].UDMA_mode > 2) 6399 chp->ch_drive[1].UDMA_mode = 2; 6400 } 6401 /* Set U66 if needed */ 6402 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6403 chp->ch_drive[0].UDMA_mode > 2) || 6404 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6405 chp->ch_drive[1].UDMA_mode > 2)) 6406 scr |= PDC262_U66_EN(channel); 6407 else 6408 scr &= ~PDC262_U66_EN(channel); 6409 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6410 PDC262_U66, scr); 6411 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6412 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6413 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6414 PDC262_ATAPI(channel))), DEBUG_PROBE); 6415 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6416 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6417 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6418 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6419 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6420 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6421 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6422 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6423 atapi = 0; 6424 else 6425 atapi = PDC262_ATAPI_UDMA; 6426 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6427 PDC262_ATAPI(channel), atapi); 6428 } 6429 } 6430 for (drive = 0; drive < 2; drive++) { 6431 drvp = &chp->ch_drive[drive]; 6432 /* If no drive, skip */ 6433 if ((drvp->drive_flags & DRIVE) == 0) 6434 continue; 6435 mode = 0; 6436 if (drvp->drive_flags & DRIVE_UDMA) { 6437 /* use Ultra/DMA */ 6438 drvp->drive_flags &= ~DRIVE_DMA; 6439 mode = PDC2xx_TIM_SET_MB(mode, 6440 pdc2xx_udma_mb[drvp->UDMA_mode]); 6441 mode = PDC2xx_TIM_SET_MC(mode, 6442 pdc2xx_udma_mc[drvp->UDMA_mode]); 6443 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6444 } else if (drvp->drive_flags & DRIVE_DMA) { 6445 mode = PDC2xx_TIM_SET_MB(mode, 6446 pdc2xx_dma_mb[drvp->DMA_mode]); 6447 mode = PDC2xx_TIM_SET_MC(mode, 6448 pdc2xx_dma_mc[drvp->DMA_mode]); 6449 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6450 } else { 6451 mode = PDC2xx_TIM_SET_MB(mode, 6452 pdc2xx_dma_mb[0]); 6453 mode = PDC2xx_TIM_SET_MC(mode, 6454 pdc2xx_dma_mc[0]); 6455 } 6456 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6457 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6458 if (drvp->drive_flags & DRIVE_ATA) 6459 mode |= PDC2xx_TIM_PRE; 6460 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6461 if (drvp->PIO_mode >= 3) { 6462 mode |= PDC2xx_TIM_IORDY; 6463 if (drive == 0) 6464 mode |= PDC2xx_TIM_IORDYp; 6465 } 6466 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6467 "timings 0x%x\n", 6468 sc->sc_wdcdev.sc_dev.dv_xname, 6469 chp->channel, drive, mode), DEBUG_PROBE); 6470 pci_conf_write(sc->sc_pc, sc->sc_tag, 6471 PDC2xx_TIM(chp->channel, drive), mode); 6472 } 6473 if (idedma_ctl != 0) { 6474 /* Add software bits in status register */ 6475 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6476 IDEDMA_CTL(channel), idedma_ctl); 6477 } 6478 pciide_print_modes(cp); 6479 } 6480 6481 void 6482 pdc20268_setup_channel(struct channel_softc *chp) 6483 { 6484 struct ata_drive_datas *drvp; 6485 int drive, cable; 6486 u_int32_t idedma_ctl; 6487 struct pciide_channel *cp = (struct pciide_channel *)chp; 6488 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6489 int channel = chp->channel; 6490 6491 /* check 80 pins cable */ 6492 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6493 6494 /* setup DMA if needed */ 6495 pciide_channel_dma_setup(cp); 6496 6497 idedma_ctl = 0; 6498 6499 for (drive = 0; drive < 2; drive++) { 6500 drvp = &chp->ch_drive[drive]; 6501 /* If no drive, skip */ 6502 if ((drvp->drive_flags & DRIVE) == 0) 6503 continue; 6504 if (drvp->drive_flags & DRIVE_UDMA) { 6505 /* use Ultra/DMA */ 6506 drvp->drive_flags &= ~DRIVE_DMA; 6507 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6508 if (cable && drvp->UDMA_mode > 2) { 6509 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6510 "cable not detected\n", drvp->drive_name, 6511 sc->sc_wdcdev.sc_dev.dv_xname, 6512 channel, drive), DEBUG_PROBE); 6513 drvp->UDMA_mode = 2; 6514 } 6515 } else if (drvp->drive_flags & DRIVE_DMA) { 6516 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6517 } 6518 } 6519 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6520 if (idedma_ctl != 0) { 6521 /* Add software bits in status register */ 6522 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6523 IDEDMA_CTL(channel), idedma_ctl); 6524 } 6525 pciide_print_modes(cp); 6526 } 6527 6528 int 6529 pdc202xx_pci_intr(void *arg) 6530 { 6531 struct pciide_softc *sc = arg; 6532 struct pciide_channel *cp; 6533 struct channel_softc *wdc_cp; 6534 int i, rv, crv; 6535 u_int32_t scr; 6536 6537 rv = 0; 6538 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6539 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6540 cp = &sc->pciide_channels[i]; 6541 wdc_cp = &cp->wdc_channel; 6542 /* If a compat channel skip. */ 6543 if (cp->compat) 6544 continue; 6545 if (scr & PDC2xx_SCR_INT(i)) { 6546 crv = wdcintr(wdc_cp); 6547 if (crv == 0) 6548 printf("%s:%d: bogus intr (reg 0x%x)\n", 6549 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6550 else 6551 rv = 1; 6552 } 6553 } 6554 return (rv); 6555 } 6556 6557 int 6558 pdc20265_pci_intr(void *arg) 6559 { 6560 struct pciide_softc *sc = arg; 6561 struct pciide_channel *cp; 6562 struct channel_softc *wdc_cp; 6563 int i, rv, crv; 6564 u_int32_t dmastat; 6565 6566 rv = 0; 6567 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6568 cp = &sc->pciide_channels[i]; 6569 wdc_cp = &cp->wdc_channel; 6570 /* If a compat channel skip. */ 6571 if (cp->compat) 6572 continue; 6573 6574 /* 6575 * In case of shared IRQ check that the interrupt 6576 * was actually generated by this channel. 6577 * Only check the channel that is enabled. 6578 */ 6579 if (cp->hw_ok && PDC_IS_268(sc)) { 6580 if ((pdc268_config_read(wdc_cp, 6581 0x0b) & PDC268_INTR) == 0) 6582 continue; 6583 } 6584 6585 /* 6586 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6587 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6588 * So use it instead (requires 2 reg reads instead of 1, 6589 * but we can't do it another way). 6590 */ 6591 dmastat = bus_space_read_1(sc->sc_dma_iot, 6592 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6593 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6594 continue; 6595 6596 crv = wdcintr(wdc_cp); 6597 if (crv == 0) 6598 printf("%s:%d: bogus intr\n", 6599 sc->sc_wdcdev.sc_dev.dv_xname, i); 6600 else 6601 rv = 1; 6602 } 6603 return (rv); 6604 } 6605 6606 void 6607 pdc20262_dma_start(void *v, int channel, int drive) 6608 { 6609 struct pciide_softc *sc = v; 6610 struct pciide_dma_maps *dma_maps = 6611 &sc->pciide_channels[channel].dma_maps[drive]; 6612 u_int8_t clock; 6613 u_int32_t count; 6614 6615 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6616 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6617 PDC262_U66); 6618 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6619 PDC262_U66, clock | PDC262_U66_EN(channel)); 6620 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6621 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6622 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6623 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6624 PDC262_ATAPI(channel), count); 6625 } 6626 6627 pciide_dma_start(v, channel, drive); 6628 } 6629 6630 int 6631 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6632 { 6633 struct pciide_softc *sc = v; 6634 struct pciide_dma_maps *dma_maps = 6635 &sc->pciide_channels[channel].dma_maps[drive]; 6636 u_int8_t clock; 6637 6638 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6639 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6640 PDC262_U66); 6641 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6642 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6643 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6644 PDC262_ATAPI(channel), 0); 6645 } 6646 6647 return (pciide_dma_finish(v, channel, drive, force)); 6648 } 6649 6650 void 6651 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6652 { 6653 struct pciide_channel *cp; 6654 struct channel_softc *wdc_cp; 6655 struct pciide_pdcsata *ps; 6656 int channel, i; 6657 bus_size_t dmasize; 6658 pci_intr_handle_t intrhandle; 6659 const char *intrstr; 6660 6661 /* Allocate memory for private data */ 6662 sc->sc_cookie = malloc(sizeof(*ps), M_DEVBUF, M_NOWAIT | M_ZERO); 6663 ps = sc->sc_cookie; 6664 6665 /* 6666 * Promise SATA controllers have 3 or 4 channels, 6667 * the usual IDE registers are mapped in I/O space, with offsets. 6668 */ 6669 if (pci_intr_map(pa, &intrhandle) != 0) { 6670 printf(": couldn't map interrupt\n"); 6671 return; 6672 } 6673 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6674 6675 switch (sc->sc_pp->ide_product) { 6676 case PCI_PRODUCT_PROMISE_PDC20318: 6677 case PCI_PRODUCT_PROMISE_PDC20319: 6678 case PCI_PRODUCT_PROMISE_PDC20371: 6679 case PCI_PRODUCT_PROMISE_PDC20375: 6680 case PCI_PRODUCT_PROMISE_PDC20376: 6681 case PCI_PRODUCT_PROMISE_PDC20377: 6682 case PCI_PRODUCT_PROMISE_PDC20378: 6683 case PCI_PRODUCT_PROMISE_PDC20379: 6684 default: 6685 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6686 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6687 sc->sc_wdcdev.sc_dev.dv_xname); 6688 break; 6689 6690 case PCI_PRODUCT_PROMISE_PDC40518: 6691 case PCI_PRODUCT_PROMISE_PDC40519: 6692 case PCI_PRODUCT_PROMISE_PDC40718: 6693 case PCI_PRODUCT_PROMISE_PDC40719: 6694 case PCI_PRODUCT_PROMISE_PDC40779: 6695 case PCI_PRODUCT_PROMISE_PDC20571: 6696 case PCI_PRODUCT_PROMISE_PDC20575: 6697 case PCI_PRODUCT_PROMISE_PDC20579: 6698 case PCI_PRODUCT_PROMISE_PDC20771: 6699 case PCI_PRODUCT_PROMISE_PDC20775: 6700 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6701 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6702 sc->sc_wdcdev.sc_dev.dv_xname); 6703 break; 6704 } 6705 6706 if (sc->sc_pci_ih == NULL) { 6707 printf(": couldn't establish native-PCI interrupt"); 6708 if (intrstr != NULL) 6709 printf(" at %s", intrstr); 6710 printf("\n"); 6711 return; 6712 } 6713 6714 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6715 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6716 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6717 if (!sc->sc_dma_ok) { 6718 printf(": couldn't map bus-master DMA registers\n"); 6719 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6720 return; 6721 } 6722 6723 sc->sc_dmat = pa->pa_dmat; 6724 6725 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6726 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6727 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6728 printf(": couldn't map IDE registers\n"); 6729 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6730 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6731 return; 6732 } 6733 6734 printf(": DMA\n"); 6735 6736 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6737 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6738 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6739 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6740 sc->sc_wdcdev.PIO_cap = 4; 6741 sc->sc_wdcdev.DMA_cap = 2; 6742 sc->sc_wdcdev.UDMA_cap = 6; 6743 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6744 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6745 6746 switch (sc->sc_pp->ide_product) { 6747 case PCI_PRODUCT_PROMISE_PDC20318: 6748 case PCI_PRODUCT_PROMISE_PDC20319: 6749 case PCI_PRODUCT_PROMISE_PDC20371: 6750 case PCI_PRODUCT_PROMISE_PDC20375: 6751 case PCI_PRODUCT_PROMISE_PDC20376: 6752 case PCI_PRODUCT_PROMISE_PDC20377: 6753 case PCI_PRODUCT_PROMISE_PDC20378: 6754 case PCI_PRODUCT_PROMISE_PDC20379: 6755 default: 6756 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6757 sc->sc_wdcdev.nchannels = 6758 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6759 PDC203xx_NCHANNELS : 3; 6760 break; 6761 6762 case PCI_PRODUCT_PROMISE_PDC40518: 6763 case PCI_PRODUCT_PROMISE_PDC40519: 6764 case PCI_PRODUCT_PROMISE_PDC40718: 6765 case PCI_PRODUCT_PROMISE_PDC40719: 6766 case PCI_PRODUCT_PROMISE_PDC40779: 6767 case PCI_PRODUCT_PROMISE_PDC20571: 6768 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6769 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6770 6771 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6772 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6773 6774 break; 6775 case PCI_PRODUCT_PROMISE_PDC20575: 6776 case PCI_PRODUCT_PROMISE_PDC20579: 6777 case PCI_PRODUCT_PROMISE_PDC20771: 6778 case PCI_PRODUCT_PROMISE_PDC20775: 6779 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6780 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6781 6782 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6783 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6784 6785 break; 6786 } 6787 6788 sc->sc_wdcdev.dma_arg = sc; 6789 sc->sc_wdcdev.dma_init = pciide_dma_init; 6790 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6791 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6792 6793 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6794 channel++) { 6795 cp = &sc->pciide_channels[channel]; 6796 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6797 6798 cp->ih = sc->sc_pci_ih; 6799 cp->name = NULL; 6800 cp->wdc_channel.channel = channel; 6801 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6802 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 6803 if (cp->wdc_channel.ch_queue == NULL) { 6804 printf("%s: channel %d: " 6805 "cannot allocate channel queue\n", 6806 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6807 continue; 6808 } 6809 wdc_cp = &cp->wdc_channel; 6810 6811 ps->regs[channel].ctl_iot = ps->ba5_st; 6812 ps->regs[channel].cmd_iot = ps->ba5_st; 6813 6814 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6815 0x0238 + (channel << 7), 1, 6816 &ps->regs[channel].ctl_ioh) != 0) { 6817 printf("%s: couldn't map channel %d ctl regs\n", 6818 sc->sc_wdcdev.sc_dev.dv_xname, 6819 channel); 6820 continue; 6821 } 6822 for (i = 0; i < WDC_NREG; i++) { 6823 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6824 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6825 &ps->regs[channel].cmd_iohs[i]) != 0) { 6826 printf("%s: couldn't map channel %d cmd " 6827 "regs\n", 6828 sc->sc_wdcdev.sc_dev.dv_xname, 6829 channel); 6830 continue; 6831 } 6832 } 6833 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6834 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6835 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6836 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6837 wdc_cp->data32iot = wdc_cp->cmd_iot = 6838 ps->regs[channel].cmd_iot; 6839 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6840 ps->regs[channel].cmd_iohs[0]; 6841 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6842 6843 /* 6844 * Subregion de busmaster registers. They're spread all over 6845 * the controller's register space :(. They are also 4 bytes 6846 * sized, with some specific extentions in the extra bits. 6847 * It also seems that the IDEDMA_CTL register isn't available. 6848 */ 6849 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6850 0x260 + (channel << 7), 1, 6851 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 6852 printf("%s channel %d: can't subregion DMA " 6853 "registers\n", 6854 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6855 continue; 6856 } 6857 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6858 0x244 + (channel << 7), 4, 6859 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 6860 printf("%s channel %d: can't subregion DMA " 6861 "registers\n", 6862 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6863 continue; 6864 } 6865 6866 wdcattach(wdc_cp); 6867 bus_space_write_4(sc->sc_dma_iot, 6868 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6869 (bus_space_read_4(sc->sc_dma_iot, 6870 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6871 0) & ~0x00003f9f) | (channel + 1)); 6872 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6873 (channel + 1) << 2, 0x00000001); 6874 6875 pdc203xx_setup_channel(&cp->wdc_channel); 6876 } 6877 6878 printf("%s: using %s for native-PCI interrupt\n", 6879 sc->sc_wdcdev.sc_dev.dv_xname, 6880 intrstr ? intrstr : "unknown interrupt"); 6881 } 6882 6883 void 6884 pdc203xx_setup_channel(struct channel_softc *chp) 6885 { 6886 struct ata_drive_datas *drvp; 6887 struct pciide_channel *cp = (struct pciide_channel *)chp; 6888 int drive, s; 6889 6890 pciide_channel_dma_setup(cp); 6891 6892 for (drive = 0; drive < 2; drive++) { 6893 drvp = &chp->ch_drive[drive]; 6894 if ((drvp->drive_flags & DRIVE) == 0) 6895 continue; 6896 if (drvp->drive_flags & DRIVE_UDMA) { 6897 s = splbio(); 6898 drvp->drive_flags &= ~DRIVE_DMA; 6899 splx(s); 6900 } 6901 } 6902 pciide_print_modes(cp); 6903 } 6904 6905 int 6906 pdc203xx_pci_intr(void *arg) 6907 { 6908 struct pciide_softc *sc = arg; 6909 struct pciide_channel *cp; 6910 struct channel_softc *wdc_cp; 6911 struct pciide_pdcsata *ps = sc->sc_cookie; 6912 int i, rv, crv; 6913 u_int32_t scr; 6914 6915 rv = 0; 6916 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 6917 6918 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6919 cp = &sc->pciide_channels[i]; 6920 wdc_cp = &cp->wdc_channel; 6921 if (scr & (1 << (i + 1))) { 6922 crv = wdcintr(wdc_cp); 6923 if (crv == 0) { 6924 printf("%s:%d: bogus intr (reg 0x%x)\n", 6925 sc->sc_wdcdev.sc_dev.dv_xname, 6926 i, scr); 6927 } else 6928 rv = 1; 6929 } 6930 } 6931 6932 return (rv); 6933 } 6934 6935 int 6936 pdc205xx_pci_intr(void *arg) 6937 { 6938 struct pciide_softc *sc = arg; 6939 struct pciide_channel *cp; 6940 struct channel_softc *wdc_cp; 6941 struct pciide_pdcsata *ps = sc->sc_cookie; 6942 int i, rv, crv; 6943 u_int32_t scr, status; 6944 6945 rv = 0; 6946 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 6947 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 6948 6949 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 6950 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 6951 6952 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6953 cp = &sc->pciide_channels[i]; 6954 wdc_cp = &cp->wdc_channel; 6955 if (scr & (1 << (i + 1))) { 6956 crv = wdcintr(wdc_cp); 6957 if (crv == 0) { 6958 printf("%s:%d: bogus intr (reg 0x%x)\n", 6959 sc->sc_wdcdev.sc_dev.dv_xname, 6960 i, scr); 6961 } else 6962 rv = 1; 6963 } 6964 } 6965 return rv; 6966 } 6967 6968 void 6969 pdc203xx_irqack(struct channel_softc *chp) 6970 { 6971 struct pciide_channel *cp = (struct pciide_channel *)chp; 6972 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6973 struct pciide_pdcsata *ps = sc->sc_cookie; 6974 int chan = chp->channel; 6975 6976 bus_space_write_4(sc->sc_dma_iot, 6977 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 6978 (bus_space_read_4(sc->sc_dma_iot, 6979 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 6980 0) & ~0x00003f9f) | (chan + 1)); 6981 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6982 (chan + 1) << 2, 0x00000001); 6983 } 6984 6985 void 6986 pdc203xx_dma_start(void *v, int channel, int drive) 6987 { 6988 struct pciide_softc *sc = v; 6989 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6990 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6991 struct pciide_pdcsata *ps = sc->sc_cookie; 6992 6993 /* Write table address */ 6994 bus_space_write_4(sc->sc_dma_iot, 6995 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 6996 dma_maps->dmamap_table->dm_segs[0].ds_addr); 6997 6998 /* Start DMA engine */ 6999 bus_space_write_4(sc->sc_dma_iot, 7000 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7001 (bus_space_read_4(sc->sc_dma_iot, 7002 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7003 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 7004 } 7005 7006 int 7007 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 7008 { 7009 struct pciide_softc *sc = v; 7010 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7011 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7012 struct pciide_pdcsata *ps = sc->sc_cookie; 7013 7014 /* Stop DMA channel */ 7015 bus_space_write_4(sc->sc_dma_iot, 7016 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7017 (bus_space_read_4(sc->sc_dma_iot, 7018 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7019 0) & ~0x80)); 7020 7021 /* Unload the map of the data buffer */ 7022 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 7023 dma_maps->dmamap_xfer->dm_mapsize, 7024 (dma_maps->dma_flags & WDC_DMA_READ) ? 7025 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 7026 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 7027 7028 return (0); 7029 } 7030 7031 u_int8_t 7032 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7033 { 7034 struct pciide_channel *cp = (struct pciide_channel *)chp; 7035 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7036 struct pciide_pdcsata *ps = sc->sc_cookie; 7037 u_int8_t val; 7038 7039 if (reg & _WDC_AUX) { 7040 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 7041 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 7042 } else { 7043 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 7044 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 7045 return (val); 7046 } 7047 } 7048 7049 void 7050 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7051 { 7052 struct pciide_channel *cp = (struct pciide_channel *)chp; 7053 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7054 struct pciide_pdcsata *ps = sc->sc_cookie; 7055 7056 if (reg & _WDC_AUX) 7057 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 7058 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 7059 else 7060 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 7061 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 7062 0, val); 7063 } 7064 7065 void 7066 pdc205xx_do_reset(struct channel_softc *chp) 7067 { 7068 struct pciide_channel *cp = (struct pciide_channel *)chp; 7069 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7070 struct pciide_pdcsata *ps = sc->sc_cookie; 7071 u_int32_t scontrol; 7072 7073 wdc_do_reset(chp); 7074 7075 /* reset SATA */ 7076 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7077 SCONTROL_WRITE(ps, chp->channel, scontrol); 7078 delay(50*1000); 7079 7080 scontrol &= ~SControl_DET_INIT; 7081 SCONTROL_WRITE(ps, chp->channel, scontrol); 7082 delay(50*1000); 7083 } 7084 7085 void 7086 pdc205xx_drv_probe(struct channel_softc *chp) 7087 { 7088 struct pciide_channel *cp = (struct pciide_channel *)chp; 7089 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7090 struct pciide_pdcsata *ps = sc->sc_cookie; 7091 bus_space_handle_t *iohs; 7092 u_int32_t scontrol, sstatus; 7093 u_int16_t scnt, sn, cl, ch; 7094 int s; 7095 7096 SCONTROL_WRITE(ps, chp->channel, 0); 7097 delay(50*1000); 7098 7099 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7100 SCONTROL_WRITE(ps,chp->channel,scontrol); 7101 delay(50*1000); 7102 7103 scontrol &= ~SControl_DET_INIT; 7104 SCONTROL_WRITE(ps,chp->channel,scontrol); 7105 delay(50*1000); 7106 7107 sstatus = SSTATUS_READ(ps,chp->channel); 7108 7109 switch (sstatus & SStatus_DET_mask) { 7110 case SStatus_DET_NODEV: 7111 /* No Device; be silent. */ 7112 break; 7113 7114 case SStatus_DET_DEV_NE: 7115 printf("%s: port %d: device connected, but " 7116 "communication not established\n", 7117 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7118 break; 7119 7120 case SStatus_DET_OFFLINE: 7121 printf("%s: port %d: PHY offline\n", 7122 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7123 break; 7124 7125 case SStatus_DET_DEV: 7126 iohs = ps->regs[chp->channel].cmd_iohs; 7127 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 7128 WDSD_IBM); 7129 delay(10); /* 400ns delay */ 7130 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 7131 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 7132 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 7133 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 7134 #if 0 7135 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7136 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7137 scnt, sn, cl, ch); 7138 #endif 7139 /* 7140 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7141 * cases we get wrong values here, so ignore it. 7142 */ 7143 s = splbio(); 7144 if (cl == 0x14 && ch == 0xeb) 7145 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7146 else 7147 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7148 splx(s); 7149 #if 0 7150 printf("%s: port %d: device present", 7151 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7152 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7153 case 1: 7154 printf(", speed: 1.5Gb/s"); 7155 break; 7156 case 2: 7157 printf(", speed: 3.0Gb/s"); 7158 break; 7159 } 7160 printf("\n"); 7161 #endif 7162 break; 7163 7164 default: 7165 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7166 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7167 } 7168 } 7169 7170 #ifdef notyet 7171 /* 7172 * Inline functions for accessing the timing registers of the 7173 * OPTi controller. 7174 * 7175 * These *MUST* disable interrupts as they need atomic access to 7176 * certain magic registers. Failure to adhere to this *will* 7177 * break things in subtle ways if the wdc registers are accessed 7178 * by an interrupt routine while this magic sequence is executing. 7179 */ 7180 static __inline__ u_int8_t 7181 opti_read_config(struct channel_softc *chp, int reg) 7182 { 7183 u_int8_t rv; 7184 int s = splhigh(); 7185 7186 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7187 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7188 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7189 7190 /* Followed by an 8-bit write of 0x3 to register #2 */ 7191 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7192 7193 /* Now we can read the required register */ 7194 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 7195 7196 /* Restore the real registers */ 7197 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7198 7199 splx(s); 7200 7201 return (rv); 7202 } 7203 7204 static __inline__ void 7205 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 7206 { 7207 int s = splhigh(); 7208 7209 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7210 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7211 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7212 7213 /* Followed by an 8-bit write of 0x3 to register #2 */ 7214 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7215 7216 /* Now we can write the required register */ 7217 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 7218 7219 /* Restore the real registers */ 7220 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7221 7222 splx(s); 7223 } 7224 7225 void 7226 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7227 { 7228 struct pciide_channel *cp; 7229 bus_size_t cmdsize, ctlsize; 7230 pcireg_t interface; 7231 u_int8_t init_ctrl; 7232 int channel; 7233 7234 printf(": DMA"); 7235 /* 7236 * XXXSCW: 7237 * There seem to be a couple of buggy revisions/implementations 7238 * of the OPTi pciide chipset. This kludge seems to fix one of 7239 * the reported problems (NetBSD PR/11644) but still fails for the 7240 * other (NetBSD PR/13151), although the latter may be due to other 7241 * issues too... 7242 */ 7243 if (sc->sc_rev <= 0x12) { 7244 printf(" (disabled)"); 7245 sc->sc_dma_ok = 0; 7246 sc->sc_wdcdev.cap = 0; 7247 } else { 7248 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 7249 pciide_mapreg_dma(sc, pa); 7250 } 7251 7252 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 7253 sc->sc_wdcdev.PIO_cap = 4; 7254 if (sc->sc_dma_ok) { 7255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7256 sc->sc_wdcdev.irqack = pciide_irqack; 7257 sc->sc_wdcdev.DMA_cap = 2; 7258 } 7259 sc->sc_wdcdev.set_modes = opti_setup_channel; 7260 7261 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7262 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7263 7264 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 7265 OPTI_REG_INIT_CONTROL); 7266 7267 interface = PCI_INTERFACE(pa->pa_class); 7268 7269 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7270 7271 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7272 cp = &sc->pciide_channels[channel]; 7273 if (pciide_chansetup(sc, channel, interface) == 0) 7274 continue; 7275 if (channel == 1 && 7276 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 7277 printf("%s: %s ignored (disabled)\n", 7278 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7279 continue; 7280 } 7281 pciide_map_compat_intr(pa, cp, channel, interface); 7282 if (cp->hw_ok == 0) 7283 continue; 7284 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7285 pciide_pci_intr); 7286 if (cp->hw_ok == 0) { 7287 pciide_unmap_compat_intr(pa, cp, channel, interface); 7288 continue; 7289 } 7290 opti_setup_channel(&cp->wdc_channel); 7291 } 7292 } 7293 7294 void 7295 opti_setup_channel(struct channel_softc *chp) 7296 { 7297 struct ata_drive_datas *drvp; 7298 struct pciide_channel *cp = (struct pciide_channel *)chp; 7299 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7300 int drive, spd; 7301 int mode[2]; 7302 u_int8_t rv, mr; 7303 7304 /* 7305 * The `Delay' and `Address Setup Time' fields of the 7306 * Miscellaneous Register are always zero initially. 7307 */ 7308 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 7309 mr &= ~(OPTI_MISC_DELAY_MASK | 7310 OPTI_MISC_ADDR_SETUP_MASK | 7311 OPTI_MISC_INDEX_MASK); 7312 7313 /* Prime the control register before setting timing values */ 7314 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 7315 7316 /* Determine the clockrate of the PCIbus the chip is attached to */ 7317 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7318 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7319 7320 /* setup DMA if needed */ 7321 pciide_channel_dma_setup(cp); 7322 7323 for (drive = 0; drive < 2; drive++) { 7324 drvp = &chp->ch_drive[drive]; 7325 /* If no drive, skip */ 7326 if ((drvp->drive_flags & DRIVE) == 0) { 7327 mode[drive] = -1; 7328 continue; 7329 } 7330 7331 if ((drvp->drive_flags & DRIVE_DMA)) { 7332 /* 7333 * Timings will be used for both PIO and DMA, 7334 * so adjust DMA mode if needed 7335 */ 7336 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7337 drvp->PIO_mode = drvp->DMA_mode + 2; 7338 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7339 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7340 drvp->PIO_mode - 2 : 0; 7341 if (drvp->DMA_mode == 0) 7342 drvp->PIO_mode = 0; 7343 7344 mode[drive] = drvp->DMA_mode + 5; 7345 } else 7346 mode[drive] = drvp->PIO_mode; 7347 7348 if (drive && mode[0] >= 0 && 7349 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7350 /* 7351 * Can't have two drives using different values 7352 * for `Address Setup Time'. 7353 * Slow down the faster drive to compensate. 7354 */ 7355 int d = (opti_tim_as[spd][mode[0]] > 7356 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7357 7358 mode[d] = mode[1-d]; 7359 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7360 chp->ch_drive[d].DMA_mode = 0; 7361 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7362 } 7363 } 7364 7365 for (drive = 0; drive < 2; drive++) { 7366 int m; 7367 if ((m = mode[drive]) < 0) 7368 continue; 7369 7370 /* Set the Address Setup Time and select appropriate index */ 7371 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7372 rv |= OPTI_MISC_INDEX(drive); 7373 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7374 7375 /* Set the pulse width and recovery timing parameters */ 7376 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7377 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7378 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7379 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7380 7381 /* Set the Enhanced Mode register appropriately */ 7382 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7383 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7384 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7385 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7386 } 7387 7388 /* Finally, enable the timings */ 7389 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7390 7391 pciide_print_modes(cp); 7392 } 7393 #endif 7394 7395 void 7396 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7397 { 7398 struct pciide_channel *cp; 7399 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7400 pcitag_t pcib_tag; 7401 int channel; 7402 bus_size_t cmdsize, ctlsize; 7403 7404 printf(": DMA"); 7405 pciide_mapreg_dma(sc, pa); 7406 printf("\n"); 7407 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7408 WDC_CAPABILITY_MODE; 7409 7410 if (sc->sc_dma_ok) { 7411 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7412 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7413 sc->sc_wdcdev.irqack = pciide_irqack; 7414 } 7415 sc->sc_wdcdev.PIO_cap = 4; 7416 sc->sc_wdcdev.DMA_cap = 2; 7417 switch (sc->sc_pp->ide_product) { 7418 case PCI_PRODUCT_RCC_OSB4_IDE: 7419 sc->sc_wdcdev.UDMA_cap = 2; 7420 break; 7421 case PCI_PRODUCT_RCC_CSB5_IDE: 7422 if (sc->sc_rev < 0x92) 7423 sc->sc_wdcdev.UDMA_cap = 4; 7424 else 7425 sc->sc_wdcdev.UDMA_cap = 5; 7426 break; 7427 case PCI_PRODUCT_RCC_CSB6_IDE: 7428 sc->sc_wdcdev.UDMA_cap = 4; 7429 break; 7430 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7431 sc->sc_wdcdev.UDMA_cap = 5; 7432 break; 7433 } 7434 7435 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7436 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7437 sc->sc_wdcdev.nchannels = 7438 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7439 7440 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7441 cp = &sc->pciide_channels[channel]; 7442 if (pciide_chansetup(sc, channel, interface) == 0) 7443 continue; 7444 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7445 serverworks_pci_intr); 7446 if (cp->hw_ok == 0) 7447 return; 7448 pciide_map_compat_intr(pa, cp, channel, interface); 7449 if (cp->hw_ok == 0) 7450 return; 7451 serverworks_setup_channel(&cp->wdc_channel); 7452 } 7453 7454 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7455 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7456 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7457 } 7458 7459 void 7460 serverworks_setup_channel(struct channel_softc *chp) 7461 { 7462 struct ata_drive_datas *drvp; 7463 struct pciide_channel *cp = (struct pciide_channel *)chp; 7464 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7465 int channel = chp->channel; 7466 int drive, unit; 7467 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7468 u_int32_t idedma_ctl; 7469 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7470 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7471 7472 /* setup DMA if needed */ 7473 pciide_channel_dma_setup(cp); 7474 7475 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7476 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7477 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7478 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7479 7480 pio_time &= ~(0xffff << (16 * channel)); 7481 dma_time &= ~(0xffff << (16 * channel)); 7482 pio_mode &= ~(0xff << (8 * channel + 16)); 7483 udma_mode &= ~(0xff << (8 * channel + 16)); 7484 udma_mode &= ~(3 << (2 * channel)); 7485 7486 idedma_ctl = 0; 7487 7488 /* Per drive settings */ 7489 for (drive = 0; drive < 2; drive++) { 7490 drvp = &chp->ch_drive[drive]; 7491 /* If no drive, skip */ 7492 if ((drvp->drive_flags & DRIVE) == 0) 7493 continue; 7494 unit = drive + 2 * channel; 7495 /* add timing values, setup DMA if needed */ 7496 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7497 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7498 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7499 (drvp->drive_flags & DRIVE_UDMA)) { 7500 /* use Ultra/DMA, check for 80-pin cable */ 7501 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7502 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7503 PCI_SUBSYS_ID_REG)) & 7504 (1 << (14 + channel))) == 0) { 7505 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7506 "cable not detected\n", drvp->drive_name, 7507 sc->sc_wdcdev.sc_dev.dv_xname, 7508 channel, drive), DEBUG_PROBE); 7509 drvp->UDMA_mode = 2; 7510 } 7511 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7512 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7513 udma_mode |= 1 << unit; 7514 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7515 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7516 (drvp->drive_flags & DRIVE_DMA)) { 7517 /* use Multiword DMA */ 7518 drvp->drive_flags &= ~DRIVE_UDMA; 7519 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7520 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7521 } else { 7522 /* PIO only */ 7523 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7524 } 7525 } 7526 7527 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7528 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7529 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7530 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7531 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7532 7533 if (idedma_ctl != 0) { 7534 /* Add software bits in status register */ 7535 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7536 IDEDMA_CTL(channel), idedma_ctl); 7537 } 7538 pciide_print_modes(cp); 7539 } 7540 7541 int 7542 serverworks_pci_intr(void *arg) 7543 { 7544 struct pciide_softc *sc = arg; 7545 struct pciide_channel *cp; 7546 struct channel_softc *wdc_cp; 7547 int rv = 0; 7548 int dmastat, i, crv; 7549 7550 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7551 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7552 IDEDMA_CTL(i)); 7553 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7554 IDEDMA_CTL_INTR) 7555 continue; 7556 cp = &sc->pciide_channels[i]; 7557 wdc_cp = &cp->wdc_channel; 7558 crv = wdcintr(wdc_cp); 7559 if (crv == 0) { 7560 printf("%s:%d: bogus intr\n", 7561 sc->sc_wdcdev.sc_dev.dv_xname, i); 7562 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7563 IDEDMA_CTL(i), dmastat); 7564 } else 7565 rv = 1; 7566 } 7567 return (rv); 7568 } 7569 7570 void 7571 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7572 { 7573 struct pciide_channel *cp; 7574 pci_intr_handle_t intrhandle; 7575 const char *intrstr; 7576 int channel; 7577 struct pciide_svwsata *ss; 7578 7579 /* Allocate memory for private data */ 7580 sc->sc_cookie = malloc(sizeof(*ss), M_DEVBUF, M_NOWAIT | M_ZERO); 7581 ss = sc->sc_cookie; 7582 7583 /* The 4-port version has a dummy second function. */ 7584 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7585 PCI_MAPREG_START + 0x14) == 0) { 7586 printf("\n"); 7587 return; 7588 } 7589 7590 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7591 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7592 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7593 printf(": unable to map BA5 register space\n"); 7594 return; 7595 } 7596 7597 printf(": DMA"); 7598 svwsata_mapreg_dma(sc, pa); 7599 printf("\n"); 7600 7601 if (sc->sc_dma_ok) { 7602 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7603 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7604 sc->sc_wdcdev.irqack = pciide_irqack; 7605 } 7606 sc->sc_wdcdev.PIO_cap = 4; 7607 sc->sc_wdcdev.DMA_cap = 2; 7608 sc->sc_wdcdev.UDMA_cap = 6; 7609 7610 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7611 sc->sc_wdcdev.nchannels = 4; 7612 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7613 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7614 sc->sc_wdcdev.set_modes = sata_setup_channel; 7615 7616 /* We can use SControl and SStatus to probe for drives. */ 7617 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7618 7619 /* Map and establish the interrupt handler. */ 7620 if(pci_intr_map(pa, &intrhandle) != 0) { 7621 printf("%s: couldn't map native-PCI interrupt\n", 7622 sc->sc_wdcdev.sc_dev.dv_xname); 7623 return; 7624 } 7625 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7626 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7627 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7628 if (sc->sc_pci_ih != NULL) { 7629 printf("%s: using %s for native-PCI interrupt\n", 7630 sc->sc_wdcdev.sc_dev.dv_xname, 7631 intrstr ? intrstr : "unknown interrupt"); 7632 } else { 7633 printf("%s: couldn't establish native-PCI interrupt", 7634 sc->sc_wdcdev.sc_dev.dv_xname); 7635 if (intrstr != NULL) 7636 printf(" at %s", intrstr); 7637 printf("\n"); 7638 return; 7639 } 7640 7641 switch (sc->sc_pp->ide_product) { 7642 case PCI_PRODUCT_RCC_K2_SATA: 7643 bus_space_write_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1, 7644 bus_space_read_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1) 7645 & ~0x00040000); 7646 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7647 SVWSATA_SIM, 0); 7648 break; 7649 } 7650 7651 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7652 cp = &sc->pciide_channels[channel]; 7653 if (pciide_chansetup(sc, channel, 0) == 0) 7654 continue; 7655 svwsata_mapchan(cp); 7656 sata_setup_channel(&cp->wdc_channel); 7657 } 7658 } 7659 7660 void 7661 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7662 { 7663 struct pciide_svwsata *ss = sc->sc_cookie; 7664 7665 sc->sc_wdcdev.dma_arg = sc; 7666 sc->sc_wdcdev.dma_init = pciide_dma_init; 7667 sc->sc_wdcdev.dma_start = pciide_dma_start; 7668 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7669 7670 /* XXX */ 7671 sc->sc_dma_iot = ss->ba5_st; 7672 sc->sc_dma_ioh = ss->ba5_sh; 7673 7674 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7675 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7676 sc->sc_dmactl_read = svwsata_dmactl_read; 7677 sc->sc_dmactl_write = svwsata_dmactl_write; 7678 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7679 7680 /* DMA registers all set up! */ 7681 sc->sc_dmat = pa->pa_dmat; 7682 sc->sc_dma_ok = 1; 7683 } 7684 7685 u_int8_t 7686 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7687 { 7688 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7689 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7690 } 7691 7692 void 7693 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7694 { 7695 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7696 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7697 } 7698 7699 u_int8_t 7700 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7701 { 7702 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7703 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7704 } 7705 7706 void 7707 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7708 { 7709 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7710 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7711 } 7712 7713 void 7714 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7715 { 7716 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7717 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7718 } 7719 7720 void 7721 svwsata_mapchan(struct pciide_channel *cp) 7722 { 7723 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7724 struct channel_softc *wdc_cp = &cp->wdc_channel; 7725 struct pciide_svwsata *ss = sc->sc_cookie; 7726 7727 cp->compat = 0; 7728 cp->ih = sc->sc_pci_ih; 7729 7730 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7731 (wdc_cp->channel << 8) + SVWSATA_TF0, 7732 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7733 printf("%s: couldn't map %s cmd regs\n", 7734 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7735 return; 7736 } 7737 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7738 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7739 &wdc_cp->ctl_ioh) != 0) { 7740 printf("%s: couldn't map %s ctl regs\n", 7741 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7742 return; 7743 } 7744 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7745 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7746 wdc_cp->ch_flags |= WDCF_DMA_BEFORE_CMD; 7747 wdcattach(wdc_cp); 7748 } 7749 7750 void 7751 svwsata_drv_probe(struct channel_softc *chp) 7752 { 7753 struct pciide_channel *cp = (struct pciide_channel *)chp; 7754 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7755 struct pciide_svwsata *ss = sc->sc_cookie; 7756 int channel = chp->channel; 7757 uint32_t scontrol, sstatus; 7758 uint8_t scnt, sn, cl, ch; 7759 int s; 7760 7761 /* 7762 * Request communication initialization sequence, any speed. 7763 * Performing this is the equivalent of an ATA Reset. 7764 */ 7765 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7766 7767 /* 7768 * XXX We don't yet support SATA power management; disable all 7769 * power management state transitions. 7770 */ 7771 scontrol |= SControl_IPM_NONE; 7772 7773 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7774 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7775 delay(50 * 1000); 7776 scontrol &= ~SControl_DET_INIT; 7777 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7778 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7779 delay(50 * 1000); 7780 7781 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7782 (channel << 8) + SVWSATA_SSTATUS); 7783 #if 0 7784 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7785 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7786 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7787 (channel << 8) + SVWSATA_SSTATUS)); 7788 #endif 7789 switch (sstatus & SStatus_DET_mask) { 7790 case SStatus_DET_NODEV: 7791 /* No device; be silent. */ 7792 break; 7793 7794 case SStatus_DET_DEV_NE: 7795 printf("%s: port %d: device connected, but " 7796 "communication not established\n", 7797 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7798 break; 7799 7800 case SStatus_DET_OFFLINE: 7801 printf("%s: port %d: PHY offline\n", 7802 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7803 break; 7804 7805 case SStatus_DET_DEV: 7806 /* 7807 * XXX ATAPI detection doesn't currently work. Don't 7808 * XXX know why. But, it's not like the standard method 7809 * XXX can detect an ATAPI device connected via a SATA/PATA 7810 * XXX bridge, so at least this is no worse. --thorpej 7811 */ 7812 if (chp->_vtbl != NULL) 7813 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7814 else 7815 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7816 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7817 delay(10); /* 400ns delay */ 7818 /* Save register contents. */ 7819 if (chp->_vtbl != NULL) { 7820 scnt = CHP_READ_REG(chp, wdr_seccnt); 7821 sn = CHP_READ_REG(chp, wdr_sector); 7822 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7823 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7824 } else { 7825 scnt = bus_space_read_1(chp->cmd_iot, 7826 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7827 sn = bus_space_read_1(chp->cmd_iot, 7828 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7829 cl = bus_space_read_1(chp->cmd_iot, 7830 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7831 ch = bus_space_read_1(chp->cmd_iot, 7832 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7833 } 7834 #if 0 7835 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7836 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7837 scnt, sn, cl, ch); 7838 #endif 7839 /* 7840 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7841 * cases we get wrong values here, so ignore it. 7842 */ 7843 s = splbio(); 7844 if (cl == 0x14 && ch == 0xeb) 7845 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7846 else 7847 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7848 splx(s); 7849 7850 printf("%s: port %d: device present", 7851 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7852 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7853 case 1: 7854 printf(", speed: 1.5Gb/s"); 7855 break; 7856 case 2: 7857 printf(", speed: 3.0Gb/s"); 7858 break; 7859 } 7860 printf("\n"); 7861 break; 7862 7863 default: 7864 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7865 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7866 } 7867 } 7868 7869 u_int8_t 7870 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7871 { 7872 if (reg & _WDC_AUX) { 7873 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 7874 (reg & _WDC_REGMASK) << 2)); 7875 } else { 7876 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 7877 (reg & _WDC_REGMASK) << 2)); 7878 } 7879 } 7880 7881 void 7882 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7883 { 7884 if (reg & _WDC_AUX) { 7885 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7886 (reg & _WDC_REGMASK) << 2, val); 7887 } else { 7888 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7889 (reg & _WDC_REGMASK) << 2, val); 7890 } 7891 } 7892 7893 void 7894 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 7895 { 7896 if (reg & _WDC_AUX) { 7897 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7898 (reg & _WDC_REGMASK) << 2, val); 7899 } else { 7900 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7901 (reg & _WDC_REGMASK) << 2, val); 7902 } 7903 } 7904 7905 #define ACARD_IS_850(sc) \ 7906 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 7907 7908 void 7909 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7910 { 7911 struct pciide_channel *cp; 7912 int i; 7913 pcireg_t interface; 7914 bus_size_t cmdsize, ctlsize; 7915 7916 /* 7917 * when the chip is in native mode it identifies itself as a 7918 * 'misc mass storage'. Fake interface in this case. 7919 */ 7920 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 7921 interface = PCI_INTERFACE(pa->pa_class); 7922 } else { 7923 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 7924 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 7925 } 7926 7927 printf(": DMA"); 7928 pciide_mapreg_dma(sc, pa); 7929 printf("\n"); 7930 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7931 WDC_CAPABILITY_MODE; 7932 7933 if (sc->sc_dma_ok) { 7934 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7935 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7936 sc->sc_wdcdev.irqack = pciide_irqack; 7937 } 7938 sc->sc_wdcdev.PIO_cap = 4; 7939 sc->sc_wdcdev.DMA_cap = 2; 7940 switch (sc->sc_pp->ide_product) { 7941 case PCI_PRODUCT_ACARD_ATP850U: 7942 sc->sc_wdcdev.UDMA_cap = 2; 7943 break; 7944 case PCI_PRODUCT_ACARD_ATP860: 7945 case PCI_PRODUCT_ACARD_ATP860A: 7946 sc->sc_wdcdev.UDMA_cap = 4; 7947 break; 7948 case PCI_PRODUCT_ACARD_ATP865A: 7949 case PCI_PRODUCT_ACARD_ATP865R: 7950 sc->sc_wdcdev.UDMA_cap = 6; 7951 break; 7952 } 7953 7954 sc->sc_wdcdev.set_modes = acard_setup_channel; 7955 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7956 sc->sc_wdcdev.nchannels = 2; 7957 7958 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7959 cp = &sc->pciide_channels[i]; 7960 if (pciide_chansetup(sc, i, interface) == 0) 7961 continue; 7962 if (interface & PCIIDE_INTERFACE_PCI(i)) { 7963 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 7964 &ctlsize, pciide_pci_intr); 7965 } else { 7966 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 7967 &cmdsize, &ctlsize); 7968 } 7969 if (cp->hw_ok == 0) 7970 return; 7971 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 7972 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 7973 wdcattach(&cp->wdc_channel); 7974 acard_setup_channel(&cp->wdc_channel); 7975 } 7976 if (!ACARD_IS_850(sc)) { 7977 u_int32_t reg; 7978 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 7979 reg &= ~ATP860_CTRL_INT; 7980 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 7981 } 7982 } 7983 7984 void 7985 acard_setup_channel(struct channel_softc *chp) 7986 { 7987 struct ata_drive_datas *drvp; 7988 struct pciide_channel *cp = (struct pciide_channel *)chp; 7989 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7990 int channel = chp->channel; 7991 int drive; 7992 u_int32_t idetime, udma_mode; 7993 u_int32_t idedma_ctl; 7994 7995 /* setup DMA if needed */ 7996 pciide_channel_dma_setup(cp); 7997 7998 if (ACARD_IS_850(sc)) { 7999 idetime = 0; 8000 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 8001 udma_mode &= ~ATP850_UDMA_MASK(channel); 8002 } else { 8003 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 8004 idetime &= ~ATP860_SETTIME_MASK(channel); 8005 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 8006 udma_mode &= ~ATP860_UDMA_MASK(channel); 8007 } 8008 8009 idedma_ctl = 0; 8010 8011 /* Per drive settings */ 8012 for (drive = 0; drive < 2; drive++) { 8013 drvp = &chp->ch_drive[drive]; 8014 /* If no drive, skip */ 8015 if ((drvp->drive_flags & DRIVE) == 0) 8016 continue; 8017 /* add timing values, setup DMA if needed */ 8018 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 8019 (drvp->drive_flags & DRIVE_UDMA)) { 8020 /* use Ultra/DMA */ 8021 if (ACARD_IS_850(sc)) { 8022 idetime |= ATP850_SETTIME(drive, 8023 acard_act_udma[drvp->UDMA_mode], 8024 acard_rec_udma[drvp->UDMA_mode]); 8025 udma_mode |= ATP850_UDMA_MODE(channel, drive, 8026 acard_udma_conf[drvp->UDMA_mode]); 8027 } else { 8028 idetime |= ATP860_SETTIME(channel, drive, 8029 acard_act_udma[drvp->UDMA_mode], 8030 acard_rec_udma[drvp->UDMA_mode]); 8031 udma_mode |= ATP860_UDMA_MODE(channel, drive, 8032 acard_udma_conf[drvp->UDMA_mode]); 8033 } 8034 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8035 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 8036 (drvp->drive_flags & DRIVE_DMA)) { 8037 /* use Multiword DMA */ 8038 drvp->drive_flags &= ~DRIVE_UDMA; 8039 if (ACARD_IS_850(sc)) { 8040 idetime |= ATP850_SETTIME(drive, 8041 acard_act_dma[drvp->DMA_mode], 8042 acard_rec_dma[drvp->DMA_mode]); 8043 } else { 8044 idetime |= ATP860_SETTIME(channel, drive, 8045 acard_act_dma[drvp->DMA_mode], 8046 acard_rec_dma[drvp->DMA_mode]); 8047 } 8048 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8049 } else { 8050 /* PIO only */ 8051 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 8052 if (ACARD_IS_850(sc)) { 8053 idetime |= ATP850_SETTIME(drive, 8054 acard_act_pio[drvp->PIO_mode], 8055 acard_rec_pio[drvp->PIO_mode]); 8056 } else { 8057 idetime |= ATP860_SETTIME(channel, drive, 8058 acard_act_pio[drvp->PIO_mode], 8059 acard_rec_pio[drvp->PIO_mode]); 8060 } 8061 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 8062 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 8063 | ATP8x0_CTRL_EN(channel)); 8064 } 8065 } 8066 8067 if (idedma_ctl != 0) { 8068 /* Add software bits in status register */ 8069 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8070 IDEDMA_CTL(channel), idedma_ctl); 8071 } 8072 pciide_print_modes(cp); 8073 8074 if (ACARD_IS_850(sc)) { 8075 pci_conf_write(sc->sc_pc, sc->sc_tag, 8076 ATP850_IDETIME(channel), idetime); 8077 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 8078 } else { 8079 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 8080 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 8081 } 8082 } 8083 8084 void 8085 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8086 { 8087 struct pciide_channel *cp; 8088 int channel; 8089 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8090 bus_size_t cmdsize, ctlsize; 8091 u_int32_t conf; 8092 8093 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8094 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8095 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8096 8097 printf(": DMA"); 8098 pciide_mapreg_dma(sc, pa); 8099 8100 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8101 WDC_CAPABILITY_MODE; 8102 if (sc->sc_dma_ok) { 8103 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8104 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8105 sc->sc_wdcdev.irqack = pciide_irqack; 8106 } 8107 sc->sc_wdcdev.PIO_cap = 4; 8108 sc->sc_wdcdev.DMA_cap = 2; 8109 switch (sc->sc_pp->ide_product) { 8110 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 8111 sc->sc_wdcdev.UDMA_cap = 5; 8112 break; 8113 default: 8114 sc->sc_wdcdev.UDMA_cap = 6; 8115 } 8116 sc->sc_wdcdev.set_modes = nforce_setup_channel; 8117 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8118 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8119 8120 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8121 8122 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8123 cp = &sc->pciide_channels[channel]; 8124 8125 if (pciide_chansetup(sc, channel, interface) == 0) 8126 continue; 8127 8128 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 8129 printf("%s: %s ignored (disabled)\n", 8130 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8131 continue; 8132 } 8133 8134 pciide_map_compat_intr(pa, cp, channel, interface); 8135 if (cp->hw_ok == 0) 8136 continue; 8137 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8138 nforce_pci_intr); 8139 if (cp->hw_ok == 0) { 8140 pciide_unmap_compat_intr(pa, cp, channel, interface); 8141 continue; 8142 } 8143 8144 if (pciide_chan_candisable(cp)) { 8145 conf &= ~NFORCE_CHAN_EN(channel); 8146 pciide_unmap_compat_intr(pa, cp, channel, interface); 8147 continue; 8148 } 8149 8150 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8151 } 8152 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8153 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8154 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8155 } 8156 8157 void 8158 nforce_setup_channel(struct channel_softc *chp) 8159 { 8160 struct ata_drive_datas *drvp; 8161 int drive, mode; 8162 u_int32_t idedma_ctl; 8163 struct pciide_channel *cp = (struct pciide_channel *)chp; 8164 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8165 int channel = chp->channel; 8166 u_int32_t conf, piodmatim, piotim, udmatim; 8167 8168 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8169 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 8170 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 8171 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 8172 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 8173 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8174 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8175 8176 /* Setup DMA if needed */ 8177 pciide_channel_dma_setup(cp); 8178 8179 /* Clear all bits for this channel */ 8180 idedma_ctl = 0; 8181 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 8182 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 8183 8184 /* Per channel settings */ 8185 for (drive = 0; drive < 2; drive++) { 8186 drvp = &chp->ch_drive[drive]; 8187 8188 /* If no drive, skip */ 8189 if ((drvp->drive_flags & DRIVE) == 0) 8190 continue; 8191 8192 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8193 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8194 /* Setup UltraDMA mode */ 8195 drvp->drive_flags &= ~DRIVE_DMA; 8196 8197 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 8198 nforce_udma[drvp->UDMA_mode]) | 8199 NFORCE_UDMA_EN(channel, drive) | 8200 NFORCE_UDMA_ENM(channel, drive); 8201 8202 mode = drvp->PIO_mode; 8203 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8204 (drvp->drive_flags & DRIVE_DMA) != 0) { 8205 /* Setup multiword DMA mode */ 8206 drvp->drive_flags &= ~DRIVE_UDMA; 8207 8208 /* mode = min(pio, dma + 2) */ 8209 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8210 mode = drvp->PIO_mode; 8211 else 8212 mode = drvp->DMA_mode + 2; 8213 } else { 8214 mode = drvp->PIO_mode; 8215 goto pio; 8216 } 8217 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8218 8219 pio: 8220 /* Setup PIO mode */ 8221 if (mode <= 2) { 8222 drvp->DMA_mode = 0; 8223 drvp->PIO_mode = 0; 8224 mode = 0; 8225 } else { 8226 drvp->PIO_mode = mode; 8227 drvp->DMA_mode = mode - 2; 8228 } 8229 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 8230 nforce_pio[mode]); 8231 } 8232 8233 if (idedma_ctl != 0) { 8234 /* Add software bits in status register */ 8235 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8236 IDEDMA_CTL(channel), idedma_ctl); 8237 } 8238 8239 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 8240 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8241 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8242 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 8243 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 8244 8245 pciide_print_modes(cp); 8246 } 8247 8248 int 8249 nforce_pci_intr(void *arg) 8250 { 8251 struct pciide_softc *sc = arg; 8252 struct pciide_channel *cp; 8253 struct channel_softc *wdc_cp; 8254 int i, rv, crv; 8255 u_int32_t dmastat; 8256 8257 rv = 0; 8258 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8259 cp = &sc->pciide_channels[i]; 8260 wdc_cp = &cp->wdc_channel; 8261 8262 /* Skip compat channel */ 8263 if (cp->compat) 8264 continue; 8265 8266 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8267 IDEDMA_CTL(i)); 8268 if ((dmastat & IDEDMA_CTL_INTR) == 0) 8269 continue; 8270 8271 crv = wdcintr(wdc_cp); 8272 if (crv == 0) 8273 printf("%s:%d: bogus intr\n", 8274 sc->sc_wdcdev.sc_dev.dv_xname, i); 8275 else 8276 rv = 1; 8277 } 8278 return (rv); 8279 } 8280 8281 void 8282 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8283 { 8284 struct pciide_channel *cp; 8285 bus_size_t cmdsize, ctlsize; 8286 pcireg_t interface; 8287 int channel; 8288 8289 printf(": DMA"); 8290 #ifdef PCIIDE_I31244_DISABLEDMA 8291 if (sc->sc_rev == 0) { 8292 printf(" disabled due to rev. 0"); 8293 sc->sc_dma_ok = 0; 8294 } else 8295 #endif 8296 pciide_mapreg_dma(sc, pa); 8297 printf("\n"); 8298 8299 /* 8300 * XXX Configure LEDs to show activity. 8301 */ 8302 8303 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8304 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 8305 sc->sc_wdcdev.PIO_cap = 4; 8306 if (sc->sc_dma_ok) { 8307 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8308 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8309 sc->sc_wdcdev.irqack = pciide_irqack; 8310 sc->sc_wdcdev.DMA_cap = 2; 8311 sc->sc_wdcdev.UDMA_cap = 6; 8312 } 8313 sc->sc_wdcdev.set_modes = sata_setup_channel; 8314 8315 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8316 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8317 8318 interface = PCI_INTERFACE(pa->pa_class); 8319 8320 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8321 cp = &sc->pciide_channels[channel]; 8322 if (pciide_chansetup(sc, channel, interface) == 0) 8323 continue; 8324 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8325 pciide_pci_intr); 8326 if (cp->hw_ok == 0) 8327 continue; 8328 pciide_map_compat_intr(pa, cp, channel, interface); 8329 sata_setup_channel(&cp->wdc_channel); 8330 } 8331 } 8332 8333 void 8334 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8335 { 8336 struct pciide_channel *cp; 8337 int channel; 8338 pcireg_t interface; 8339 bus_size_t cmdsize, ctlsize; 8340 pcireg_t cfg, modectl; 8341 8342 /* 8343 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8344 */ 8345 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8346 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8347 8348 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8349 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8350 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8351 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8352 modectl & IT_MODE_MASK), DEBUG_PROBE); 8353 8354 printf(": DMA"); 8355 pciide_mapreg_dma(sc, pa); 8356 8357 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8358 WDC_CAPABILITY_MODE; 8359 if (sc->sc_dma_ok) { 8360 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8361 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8362 sc->sc_wdcdev.irqack = pciide_irqack; 8363 } 8364 sc->sc_wdcdev.PIO_cap = 4; 8365 sc->sc_wdcdev.DMA_cap = 2; 8366 sc->sc_wdcdev.UDMA_cap = 6; 8367 8368 sc->sc_wdcdev.set_modes = ite_setup_channel; 8369 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8370 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8371 8372 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8373 8374 /* Disable RAID */ 8375 modectl &= ~IT_MODE_RAID1; 8376 /* Disable CPU firmware mode */ 8377 modectl &= ~IT_MODE_CPU; 8378 8379 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8380 8381 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8382 cp = &sc->pciide_channels[channel]; 8383 8384 if (pciide_chansetup(sc, channel, interface) == 0) 8385 continue; 8386 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8387 pciide_pci_intr); 8388 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8389 } 8390 8391 /* Re-read configuration registers after channels setup */ 8392 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8393 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8394 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8395 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8396 modectl & IT_MODE_MASK), DEBUG_PROBE); 8397 } 8398 8399 void 8400 ite_setup_channel(struct channel_softc *chp) 8401 { 8402 struct ata_drive_datas *drvp; 8403 int drive, mode; 8404 u_int32_t idedma_ctl; 8405 struct pciide_channel *cp = (struct pciide_channel *)chp; 8406 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8407 int channel = chp->channel; 8408 pcireg_t cfg, modectl; 8409 pcireg_t tim; 8410 8411 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8412 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8413 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8414 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8415 channel, tim), DEBUG_PROBE); 8416 8417 /* Setup DMA if needed */ 8418 pciide_channel_dma_setup(cp); 8419 8420 /* Clear all bits for this channel */ 8421 idedma_ctl = 0; 8422 8423 /* Per channel settings */ 8424 for (drive = 0; drive < 2; drive++) { 8425 drvp = &chp->ch_drive[drive]; 8426 8427 /* If no drive, skip */ 8428 if ((drvp->drive_flags & DRIVE) == 0) 8429 continue; 8430 8431 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8432 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8433 /* Setup UltraDMA mode */ 8434 drvp->drive_flags &= ~DRIVE_DMA; 8435 modectl &= ~IT_MODE_DMA(channel, drive); 8436 8437 #if 0 8438 /* Check cable, works only in CPU firmware mode */ 8439 if (drvp->UDMA_mode > 2 && 8440 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8441 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8442 "80-wire cable not detected\n", 8443 drvp->drive_name, 8444 sc->sc_wdcdev.sc_dev.dv_xname, 8445 channel, drive), DEBUG_PROBE); 8446 drvp->UDMA_mode = 2; 8447 } 8448 #endif 8449 8450 if (drvp->UDMA_mode >= 5) 8451 tim |= IT_TIM_UDMA5(drive); 8452 else 8453 tim &= ~IT_TIM_UDMA5(drive); 8454 8455 mode = drvp->PIO_mode; 8456 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8457 (drvp->drive_flags & DRIVE_DMA) != 0) { 8458 /* Setup multiword DMA mode */ 8459 drvp->drive_flags &= ~DRIVE_UDMA; 8460 modectl |= IT_MODE_DMA(channel, drive); 8461 8462 /* mode = min(pio, dma + 2) */ 8463 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8464 mode = drvp->PIO_mode; 8465 else 8466 mode = drvp->DMA_mode + 2; 8467 } else { 8468 mode = drvp->PIO_mode; 8469 goto pio; 8470 } 8471 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8472 8473 pio: 8474 /* Setup PIO mode */ 8475 if (mode <= 2) { 8476 drvp->DMA_mode = 0; 8477 drvp->PIO_mode = 0; 8478 mode = 0; 8479 } else { 8480 drvp->PIO_mode = mode; 8481 drvp->DMA_mode = mode - 2; 8482 } 8483 8484 /* Enable IORDY if PIO mode >= 3 */ 8485 if (drvp->PIO_mode >= 3) 8486 cfg |= IT_CFG_IORDY(channel); 8487 } 8488 8489 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8490 tim), DEBUG_PROBE); 8491 8492 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8493 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8494 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8495 8496 if (idedma_ctl != 0) { 8497 /* Add software bits in status register */ 8498 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8499 IDEDMA_CTL(channel), idedma_ctl); 8500 } 8501 8502 pciide_print_modes(cp); 8503 } 8504 8505 void 8506 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8507 { 8508 struct pciide_channel *cp; 8509 int channel; 8510 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8511 bus_size_t cmdsize, ctlsize; 8512 8513 printf(": DMA"); 8514 pciide_mapreg_dma(sc, pa); 8515 8516 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8517 WDC_CAPABILITY_MODE; 8518 if (sc->sc_dma_ok) { 8519 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8520 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8521 sc->sc_wdcdev.irqack = pciide_irqack; 8522 } 8523 sc->sc_wdcdev.PIO_cap = 4; 8524 sc->sc_wdcdev.DMA_cap = 2; 8525 sc->sc_wdcdev.UDMA_cap = 6; 8526 8527 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8528 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8529 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8530 8531 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8532 8533 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8534 cp = &sc->pciide_channels[channel]; 8535 if (pciide_chansetup(sc, channel, interface) == 0) 8536 continue; 8537 pciide_map_compat_intr(pa, cp, channel, interface); 8538 if (cp->hw_ok == 0) 8539 continue; 8540 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8541 pciide_pci_intr); 8542 if (cp->hw_ok == 0) { 8543 pciide_unmap_compat_intr(pa, cp, channel, interface); 8544 continue; 8545 } 8546 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8547 } 8548 } 8549 8550 void 8551 ixp_setup_channel(struct channel_softc *chp) 8552 { 8553 struct ata_drive_datas *drvp; 8554 int drive, mode; 8555 u_int32_t idedma_ctl; 8556 struct pciide_channel *cp = (struct pciide_channel*)chp; 8557 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8558 int channel = chp->channel; 8559 pcireg_t udma, mdma_timing, pio, pio_timing; 8560 8561 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8562 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8563 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8564 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8565 8566 /* Setup DMA if needed */ 8567 pciide_channel_dma_setup(cp); 8568 8569 idedma_ctl = 0; 8570 8571 /* Per channel settings */ 8572 for (drive = 0; drive < 2; drive++) { 8573 drvp = &chp->ch_drive[drive]; 8574 8575 /* If no drive, skip */ 8576 if ((drvp->drive_flags & DRIVE) == 0) 8577 continue; 8578 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8579 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8580 /* Setup UltraDMA mode */ 8581 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8582 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8583 IXP_SET_MODE(udma, chp->channel, drive, 8584 drvp->UDMA_mode); 8585 mode = drvp->PIO_mode; 8586 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8587 (drvp->drive_flags & DRIVE_DMA) != 0) { 8588 /* Setup multiword DMA mode */ 8589 drvp->drive_flags &= ~DRIVE_UDMA; 8590 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8591 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8592 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8593 ixp_mdma_timings[drvp->DMA_mode]); 8594 8595 /* mode = min(pio, dma + 2) */ 8596 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8597 mode = drvp->PIO_mode; 8598 else 8599 mode = drvp->DMA_mode + 2; 8600 } else { 8601 mode = drvp->PIO_mode; 8602 } 8603 8604 /* Setup PIO mode */ 8605 drvp->PIO_mode = mode; 8606 if (mode < 2) 8607 drvp->DMA_mode = 0; 8608 else 8609 drvp->DMA_mode = mode - 2; 8610 /* 8611 * Set PIO mode and timings 8612 * Linux driver avoids PIO mode 1, let's do it too. 8613 */ 8614 if (drvp->PIO_mode == 1) 8615 drvp->PIO_mode = 0; 8616 8617 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8618 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8619 ixp_pio_timings[drvp->PIO_mode]); 8620 } 8621 8622 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8623 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8624 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8625 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8626 8627 if (idedma_ctl != 0) { 8628 /* Add software bits in status register */ 8629 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8630 IDEDMA_CTL(channel), idedma_ctl); 8631 } 8632 8633 pciide_print_modes(cp); 8634 } 8635 8636 void 8637 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8638 { 8639 struct pciide_channel *cp; 8640 int channel; 8641 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8642 bus_size_t cmdsize, ctlsize; 8643 u_int32_t conf; 8644 8645 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8646 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8647 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8648 8649 printf(": DMA"); 8650 pciide_mapreg_dma(sc, pa); 8651 8652 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8653 WDC_CAPABILITY_MODE; 8654 if (sc->sc_dma_ok) { 8655 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8656 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8657 sc->sc_wdcdev.irqack = pciide_irqack; 8658 } 8659 sc->sc_wdcdev.PIO_cap = 4; 8660 sc->sc_wdcdev.DMA_cap = 2; 8661 sc->sc_wdcdev.UDMA_cap = 6; 8662 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8663 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8664 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8665 8666 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8667 8668 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8669 cp = &sc->pciide_channels[channel]; 8670 8671 if (pciide_chansetup(sc, channel, interface) == 0) 8672 continue; 8673 8674 #if 0 8675 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8676 printf("%s: %s ignored (disabled)\n", 8677 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8678 continue; 8679 } 8680 #endif 8681 8682 pciide_map_compat_intr(pa, cp, channel, interface); 8683 if (cp->hw_ok == 0) 8684 continue; 8685 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8686 pciide_pci_intr); 8687 if (cp->hw_ok == 0) { 8688 pciide_unmap_compat_intr(pa, cp, channel, interface); 8689 continue; 8690 } 8691 8692 if (pciide_chan_candisable(cp)) { 8693 conf &= ~JMICRON_CHAN_EN(channel); 8694 pciide_unmap_compat_intr(pa, cp, channel, interface); 8695 continue; 8696 } 8697 8698 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8699 } 8700 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8701 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8702 pci_conf_write(sc->sc_pc, sc->sc_tag, JMICRON_CONF, conf); 8703 } 8704 8705 void 8706 jmicron_setup_channel(struct channel_softc *chp) 8707 { 8708 struct ata_drive_datas *drvp; 8709 int drive, mode; 8710 u_int32_t idedma_ctl; 8711 struct pciide_channel *cp = (struct pciide_channel *)chp; 8712 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8713 int channel = chp->channel; 8714 u_int32_t conf; 8715 8716 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8717 8718 /* Setup DMA if needed */ 8719 pciide_channel_dma_setup(cp); 8720 8721 /* Clear all bits for this channel */ 8722 idedma_ctl = 0; 8723 8724 /* Per channel settings */ 8725 for (drive = 0; drive < 2; drive++) { 8726 drvp = &chp->ch_drive[drive]; 8727 8728 /* If no drive, skip */ 8729 if ((drvp->drive_flags & DRIVE) == 0) 8730 continue; 8731 8732 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8733 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8734 /* Setup UltraDMA mode */ 8735 drvp->drive_flags &= ~DRIVE_DMA; 8736 8737 /* see if cable is up to scratch */ 8738 if ((conf & JMICRON_CONF_40PIN) && 8739 (drvp->UDMA_mode > 2)) 8740 drvp->UDMA_mode = 2; 8741 8742 mode = drvp->PIO_mode; 8743 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8744 (drvp->drive_flags & DRIVE_DMA) != 0) { 8745 /* Setup multiword DMA mode */ 8746 drvp->drive_flags &= ~DRIVE_UDMA; 8747 8748 /* mode = min(pio, dma + 2) */ 8749 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8750 mode = drvp->PIO_mode; 8751 else 8752 mode = drvp->DMA_mode + 2; 8753 } else { 8754 mode = drvp->PIO_mode; 8755 goto pio; 8756 } 8757 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8758 8759 pio: 8760 /* Setup PIO mode */ 8761 if (mode <= 2) { 8762 drvp->DMA_mode = 0; 8763 drvp->PIO_mode = 0; 8764 } else { 8765 drvp->PIO_mode = mode; 8766 drvp->DMA_mode = mode - 2; 8767 } 8768 } 8769 8770 if (idedma_ctl != 0) { 8771 /* Add software bits in status register */ 8772 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8773 IDEDMA_CTL(channel), idedma_ctl); 8774 } 8775 8776 pciide_print_modes(cp); 8777 } 8778 8779 void 8780 phison_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8781 { 8782 struct pciide_channel *cp; 8783 int channel; 8784 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8785 bus_size_t cmdsize, ctlsize; 8786 8787 sc->chip_unmap = default_chip_unmap; 8788 8789 printf(": DMA"); 8790 pciide_mapreg_dma(sc, pa); 8791 8792 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8793 WDC_CAPABILITY_MODE; 8794 if (sc->sc_dma_ok) { 8795 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8796 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8797 sc->sc_wdcdev.irqack = pciide_irqack; 8798 } 8799 sc->sc_wdcdev.PIO_cap = 4; 8800 sc->sc_wdcdev.DMA_cap = 2; 8801 sc->sc_wdcdev.UDMA_cap = 5; 8802 sc->sc_wdcdev.set_modes = phison_setup_channel; 8803 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8804 sc->sc_wdcdev.nchannels = 1; 8805 8806 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8807 8808 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8809 cp = &sc->pciide_channels[channel]; 8810 8811 if (pciide_chansetup(sc, channel, interface) == 0) 8812 continue; 8813 8814 pciide_map_compat_intr(pa, cp, channel, interface); 8815 if (cp->hw_ok == 0) 8816 continue; 8817 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8818 pciide_pci_intr); 8819 if (cp->hw_ok == 0) { 8820 pciide_unmap_compat_intr(pa, cp, channel, interface); 8821 continue; 8822 } 8823 8824 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8825 } 8826 } 8827 8828 void 8829 phison_setup_channel(struct channel_softc *chp) 8830 { 8831 struct ata_drive_datas *drvp; 8832 int drive, mode; 8833 u_int32_t idedma_ctl; 8834 struct pciide_channel *cp = (struct pciide_channel *)chp; 8835 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8836 int channel = chp->channel; 8837 8838 /* Setup DMA if needed */ 8839 pciide_channel_dma_setup(cp); 8840 8841 /* Clear all bits for this channel */ 8842 idedma_ctl = 0; 8843 8844 /* Per channel settings */ 8845 for (drive = 0; drive < 2; drive++) { 8846 drvp = &chp->ch_drive[drive]; 8847 8848 /* If no drive, skip */ 8849 if ((drvp->drive_flags & DRIVE) == 0) 8850 continue; 8851 8852 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8853 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8854 /* Setup UltraDMA mode */ 8855 drvp->drive_flags &= ~DRIVE_DMA; 8856 mode = drvp->PIO_mode; 8857 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8858 (drvp->drive_flags & DRIVE_DMA) != 0) { 8859 /* Setup multiword DMA mode */ 8860 drvp->drive_flags &= ~DRIVE_UDMA; 8861 8862 /* mode = min(pio, dma + 2) */ 8863 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8864 mode = drvp->PIO_mode; 8865 else 8866 mode = drvp->DMA_mode + 2; 8867 } else { 8868 mode = drvp->PIO_mode; 8869 goto pio; 8870 } 8871 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8872 8873 pio: 8874 /* Setup PIO mode */ 8875 if (mode <= 2) { 8876 drvp->DMA_mode = 0; 8877 drvp->PIO_mode = 0; 8878 } else { 8879 drvp->PIO_mode = mode; 8880 drvp->DMA_mode = mode - 2; 8881 } 8882 } 8883 8884 if (idedma_ctl != 0) { 8885 /* Add software bits in status register */ 8886 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8887 IDEDMA_CTL(channel), idedma_ctl); 8888 } 8889 8890 pciide_print_modes(cp); 8891 } 8892 8893 void 8894 sch_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8895 { 8896 struct pciide_channel *cp; 8897 int channel; 8898 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8899 bus_size_t cmdsize, ctlsize; 8900 8901 printf(": DMA"); 8902 pciide_mapreg_dma(sc, pa); 8903 8904 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8905 WDC_CAPABILITY_MODE; 8906 if (sc->sc_dma_ok) { 8907 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8908 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8909 sc->sc_wdcdev.irqack = pciide_irqack; 8910 } 8911 sc->sc_wdcdev.PIO_cap = 4; 8912 sc->sc_wdcdev.DMA_cap = 2; 8913 sc->sc_wdcdev.UDMA_cap = 5; 8914 sc->sc_wdcdev.set_modes = sch_setup_channel; 8915 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8916 sc->sc_wdcdev.nchannels = 1; 8917 8918 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8919 8920 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8921 cp = &sc->pciide_channels[channel]; 8922 8923 if (pciide_chansetup(sc, channel, interface) == 0) 8924 continue; 8925 8926 pciide_map_compat_intr(pa, cp, channel, interface); 8927 if (cp->hw_ok == 0) 8928 continue; 8929 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8930 pciide_pci_intr); 8931 if (cp->hw_ok == 0) { 8932 pciide_unmap_compat_intr(pa, cp, channel, interface); 8933 continue; 8934 } 8935 8936 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8937 } 8938 } 8939 8940 void 8941 sch_setup_channel(struct channel_softc *chp) 8942 { 8943 struct ata_drive_datas *drvp; 8944 int drive, mode; 8945 u_int32_t tim, timaddr; 8946 struct pciide_channel *cp = (struct pciide_channel *)chp; 8947 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8948 8949 /* Setup DMA if needed */ 8950 pciide_channel_dma_setup(cp); 8951 8952 /* Per channel settings */ 8953 for (drive = 0; drive < 2; drive++) { 8954 drvp = &chp->ch_drive[drive]; 8955 8956 /* If no drive, skip */ 8957 if ((drvp->drive_flags & DRIVE) == 0) 8958 continue; 8959 8960 timaddr = (drive == 0) ? SCH_D0TIM : SCH_D1TIM; 8961 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, timaddr); 8962 tim &= ~SCH_TIM_MASK; 8963 8964 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8965 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8966 /* Setup UltraDMA mode */ 8967 drvp->drive_flags &= ~DRIVE_DMA; 8968 8969 mode = drvp->PIO_mode; 8970 tim |= (drvp->UDMA_mode << 16) | SCH_TIM_SYNCDMA; 8971 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8972 (drvp->drive_flags & DRIVE_DMA) != 0) { 8973 /* Setup multiword DMA mode */ 8974 drvp->drive_flags &= ~DRIVE_UDMA; 8975 8976 tim &= ~SCH_TIM_SYNCDMA; 8977 8978 /* mode = min(pio, dma + 2) */ 8979 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8980 mode = drvp->PIO_mode; 8981 else 8982 mode = drvp->DMA_mode + 2; 8983 } else { 8984 mode = drvp->PIO_mode; 8985 goto pio; 8986 } 8987 8988 pio: 8989 /* Setup PIO mode */ 8990 if (mode <= 2) { 8991 drvp->DMA_mode = 0; 8992 drvp->PIO_mode = 0; 8993 } else { 8994 drvp->PIO_mode = mode; 8995 drvp->DMA_mode = mode - 2; 8996 } 8997 tim |= (drvp->DMA_mode << 8) | (drvp->PIO_mode); 8998 pci_conf_write(sc->sc_pc, sc->sc_tag, timaddr, tim); 8999 } 9000 9001 pciide_print_modes(cp); 9002 } 9003