1 /* $OpenBSD: pciide.c,v 1.357 2015/12/21 20:52:33 mmcc Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions 34 * are met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 3. All advertising materials mentioning features or use of this software 41 * must display the following acknowledgement: 42 * This product includes software developed by Christopher G. Demetriou 43 * for the NetBSD Project. 44 * 4. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * PCI IDE controller driver. 61 * 62 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 63 * sys/dev/pci/ppb.c, revision 1.16). 64 * 65 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 66 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 67 * 5/16/94" from the PCI SIG. 68 * 69 */ 70 71 #define DEBUG_DMA 0x01 72 #define DEBUG_XFERS 0x02 73 #define DEBUG_FUNCS 0x08 74 #define DEBUG_PROBE 0x10 75 76 #ifdef WDCDEBUG 77 #ifndef WDCDEBUG_PCIIDE_MASK 78 #define WDCDEBUG_PCIIDE_MASK 0x00 79 #endif 80 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 81 #define WDCDEBUG_PRINT(args, level) do { \ 82 if ((wdcdebug_pciide_mask & (level)) != 0) \ 83 printf args; \ 84 } while (0) 85 #else 86 #define WDCDEBUG_PRINT(args, level) 87 #endif 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/device.h> 91 #include <sys/malloc.h> 92 #include <sys/endian.h> 93 94 #include <machine/bus.h> 95 96 #include <dev/ata/atavar.h> 97 #include <dev/ata/satareg.h> 98 #include <dev/ic/wdcreg.h> 99 #include <dev/ic/wdcvar.h> 100 101 #include <dev/pci/pcireg.h> 102 #include <dev/pci/pcivar.h> 103 #include <dev/pci/pcidevs.h> 104 105 #include <dev/pci/pciidereg.h> 106 #include <dev/pci/pciidevar.h> 107 #include <dev/pci/pciide_piix_reg.h> 108 #include <dev/pci/pciide_amd_reg.h> 109 #include <dev/pci/pciide_apollo_reg.h> 110 #include <dev/pci/pciide_cmd_reg.h> 111 #include <dev/pci/pciide_sii3112_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/pciide_acard_reg.h> 119 #include <dev/pci/pciide_natsemi_reg.h> 120 #include <dev/pci/pciide_nforce_reg.h> 121 #include <dev/pci/pciide_ite_reg.h> 122 #include <dev/pci/pciide_ixp_reg.h> 123 #include <dev/pci/pciide_svwsata_reg.h> 124 #include <dev/pci/pciide_jmicron_reg.h> 125 #include <dev/pci/pciide_rdc_reg.h> 126 #include <dev/pci/cy82c693var.h> 127 128 /* functions for reading/writing 8-bit PCI registers */ 129 130 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 131 int); 132 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 133 int, u_int8_t); 134 135 u_int8_t 136 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 137 { 138 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 139 ((reg & 0x03) * 8) & 0xff); 140 } 141 142 void 143 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 144 { 145 pcireg_t pcival; 146 147 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 148 pcival &= ~(0xff << ((reg & 0x03) * 8)); 149 pcival |= (val << ((reg & 0x03) * 8)); 150 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 151 } 152 153 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 154 155 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 156 void sata_setup_channel(struct channel_softc *); 157 158 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 159 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 160 void piix_setup_channel(struct channel_softc *); 161 void piix3_4_setup_channel(struct channel_softc *); 162 void piix_timing_debug(struct pciide_softc *); 163 164 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 165 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 166 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 167 168 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 169 void amd756_setup_channel(struct channel_softc *); 170 171 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 172 void apollo_setup_channel(struct channel_softc *); 173 174 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 175 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 176 void cmd0643_9_setup_channel(struct channel_softc *); 177 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 178 void cmd680_setup_channel(struct channel_softc *); 179 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 180 void cmd_channel_map(struct pci_attach_args *, 181 struct pciide_softc *, int); 182 int cmd_pci_intr(void *); 183 void cmd646_9_irqack(struct channel_softc *); 184 185 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 186 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 187 void sii3112_setup_channel(struct channel_softc *); 188 void sii3112_drv_probe(struct channel_softc *); 189 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 190 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 191 int sii3114_chansetup(struct pciide_softc *, int); 192 void sii3114_mapchan(struct pciide_channel *); 193 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 194 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 195 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 196 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 197 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 198 199 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 200 void cy693_setup_channel(struct channel_softc *); 201 202 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 203 void sis_setup_channel(struct channel_softc *); 204 void sis96x_setup_channel(struct channel_softc *); 205 int sis_hostbr_match(struct pci_attach_args *); 206 int sis_south_match(struct pci_attach_args *); 207 208 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 209 void natsemi_setup_channel(struct channel_softc *); 210 int natsemi_pci_intr(void *); 211 void natsemi_irqack(struct channel_softc *); 212 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 213 void ns_scx200_setup_channel(struct channel_softc *); 214 215 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 216 void acer_setup_channel(struct channel_softc *); 217 int acer_pci_intr(void *); 218 int acer_dma_init(void *, int, int, void *, size_t, int); 219 220 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 221 void pdc202xx_setup_channel(struct channel_softc *); 222 void pdc20268_setup_channel(struct channel_softc *); 223 int pdc202xx_pci_intr(void *); 224 int pdc20265_pci_intr(void *); 225 void pdc20262_dma_start(void *, int, int); 226 int pdc20262_dma_finish(void *, int, int, int); 227 228 u_int8_t pdc268_config_read(struct channel_softc *, int); 229 230 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 231 void pdc203xx_setup_channel(struct channel_softc *); 232 int pdc203xx_pci_intr(void *); 233 void pdc203xx_irqack(struct channel_softc *); 234 void pdc203xx_dma_start(void *,int ,int); 235 int pdc203xx_dma_finish(void *, int, int, int); 236 int pdc205xx_pci_intr(void *); 237 void pdc205xx_do_reset(struct channel_softc *); 238 void pdc205xx_drv_probe(struct channel_softc *); 239 240 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 241 void opti_setup_channel(struct channel_softc *); 242 243 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 244 void hpt_setup_channel(struct channel_softc *); 245 int hpt_pci_intr(void *); 246 247 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 248 void acard_setup_channel(struct channel_softc *); 249 250 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 251 void serverworks_setup_channel(struct channel_softc *); 252 int serverworks_pci_intr(void *); 253 254 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 255 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 256 void svwsata_mapchan(struct pciide_channel *); 257 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 258 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 259 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 260 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 261 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 262 void svwsata_drv_probe(struct channel_softc *); 263 264 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 265 void nforce_setup_channel(struct channel_softc *); 266 int nforce_pci_intr(void *); 267 268 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 269 270 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 271 void ite_setup_channel(struct channel_softc *); 272 273 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 274 void ixp_setup_channel(struct channel_softc *); 275 276 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 277 void jmicron_setup_channel(struct channel_softc *); 278 279 void phison_chip_map(struct pciide_softc *, struct pci_attach_args *); 280 void phison_setup_channel(struct channel_softc *); 281 282 void sch_chip_map(struct pciide_softc *, struct pci_attach_args *); 283 void sch_setup_channel(struct channel_softc *); 284 285 void rdc_chip_map(struct pciide_softc *, struct pci_attach_args *); 286 void rdc_setup_channel(struct channel_softc *); 287 288 struct pciide_product_desc { 289 u_int32_t ide_product; 290 u_short ide_flags; 291 /* map and setup chip, probe drives */ 292 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 293 }; 294 295 /* Flags for ide_flags */ 296 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 297 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 298 299 /* Default product description for devices not known from this controller */ 300 const struct pciide_product_desc default_product_desc = { 301 0, /* Generic PCI IDE controller */ 302 0, 303 default_chip_map 304 }; 305 306 const struct pciide_product_desc pciide_intel_products[] = { 307 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 308 0, 309 artisea_chip_map 310 }, 311 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 312 0, 313 default_chip_map 314 }, 315 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 316 0, 317 piix_chip_map 318 }, 319 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 320 0, 321 piix_chip_map 322 }, 323 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 324 0, 325 piix_chip_map 326 }, 327 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 328 0, 329 piix_chip_map 330 }, 331 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 332 0, 333 piix_chip_map 334 }, 335 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 336 0, 337 piix_chip_map 338 }, 339 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 340 0, 341 piix_chip_map 342 }, 343 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 344 0, 345 piix_chip_map 346 }, 347 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 348 0, 349 piix_chip_map 350 }, 351 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 352 0, 353 piix_chip_map 354 }, 355 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 356 0, 357 piix_chip_map 358 }, 359 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 360 0, 361 piix_chip_map 362 }, 363 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 364 0, 365 piix_chip_map 366 }, 367 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 368 0, 369 piix_chip_map 370 }, 371 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 372 0, 373 piix_chip_map 374 }, 375 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 376 0, 377 piix_chip_map 378 }, 379 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 380 0, 381 piix_chip_map 382 }, 383 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 384 0, 385 piix_chip_map 386 }, 387 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 388 0, 389 piixsata_chip_map 390 }, 391 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 392 0, 393 piixsata_chip_map 394 }, 395 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 396 0, 397 piix_chip_map 398 }, 399 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 400 0, 401 piixsata_chip_map 402 }, 403 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 404 0, 405 piixsata_chip_map 406 }, 407 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 408 0, 409 piix_chip_map 410 }, 411 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 412 0, 413 piix_chip_map 414 }, 415 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 416 0, 417 piixsata_chip_map 418 }, 419 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 420 0, 421 piixsata_chip_map 422 }, 423 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 424 0, 425 piixsata_chip_map 426 }, 427 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 428 0, 429 piix_chip_map 430 }, 431 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 432 0, 433 piixsata_chip_map 434 }, 435 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 436 0, 437 piixsata_chip_map 438 }, 439 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 440 0, 441 piixsata_chip_map 442 }, 443 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 444 0, 445 piixsata_chip_map 446 }, 447 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 448 0, 449 piixsata_chip_map 450 }, 451 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 452 0, 453 piixsata_chip_map 454 }, 455 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 456 0, 457 piixsata_chip_map 458 }, 459 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 460 0, 461 piixsata_chip_map 462 }, 463 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 464 0, 465 piixsata_chip_map 466 }, 467 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 468 0, 469 piixsata_chip_map 470 }, 471 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 472 0, 473 piixsata_chip_map 474 }, 475 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 476 0, 477 piixsata_chip_map 478 }, 479 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 480 0, 481 piixsata_chip_map 482 }, 483 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 484 0, 485 piixsata_chip_map 486 }, 487 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 488 0, 489 piix_chip_map 490 }, 491 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 492 0, 493 piixsata_chip_map 494 }, 495 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 496 0, 497 piixsata_chip_map 498 }, 499 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 500 0, 501 piixsata_chip_map 502 }, 503 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 504 0, 505 piixsata_chip_map 506 }, 507 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 508 0, 509 piixsata_chip_map 510 }, 511 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 512 0, 513 piixsata_chip_map 514 }, 515 { PCI_PRODUCT_INTEL_82801JD_SATA_1, /* Intel 82801JD (ICH10) SATA */ 516 0, 517 piixsata_chip_map 518 }, 519 { PCI_PRODUCT_INTEL_82801JD_SATA_2, /* Intel 82801JD (ICH10) SATA */ 520 0, 521 piixsata_chip_map 522 }, 523 { PCI_PRODUCT_INTEL_82801JI_SATA_1, /* Intel 82801JI (ICH10) SATA */ 524 0, 525 piixsata_chip_map 526 }, 527 { PCI_PRODUCT_INTEL_82801JI_SATA_2, /* Intel 82801JI (ICH10) SATA */ 528 0, 529 piixsata_chip_map 530 }, 531 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 532 0, 533 piixsata_chip_map 534 }, 535 { PCI_PRODUCT_INTEL_3400_SATA_1, /* Intel 3400 SATA */ 536 0, 537 piixsata_chip_map 538 }, 539 { PCI_PRODUCT_INTEL_3400_SATA_2, /* Intel 3400 SATA */ 540 0, 541 piixsata_chip_map 542 }, 543 { PCI_PRODUCT_INTEL_3400_SATA_3, /* Intel 3400 SATA */ 544 0, 545 piixsata_chip_map 546 }, 547 { PCI_PRODUCT_INTEL_3400_SATA_4, /* Intel 3400 SATA */ 548 0, 549 piixsata_chip_map 550 }, 551 { PCI_PRODUCT_INTEL_3400_SATA_5, /* Intel 3400 SATA */ 552 0, 553 piixsata_chip_map 554 }, 555 { PCI_PRODUCT_INTEL_3400_SATA_6, /* Intel 3400 SATA */ 556 0, 557 piixsata_chip_map 558 }, 559 { PCI_PRODUCT_INTEL_C600_SATA, /* Intel C600 SATA */ 560 0, 561 piixsata_chip_map 562 }, 563 { PCI_PRODUCT_INTEL_C610_SATA_1, /* Intel C610 SATA */ 564 0, 565 piixsata_chip_map 566 }, 567 { PCI_PRODUCT_INTEL_C610_SATA_2, /* Intel C610 SATA */ 568 0, 569 piixsata_chip_map 570 }, 571 { PCI_PRODUCT_INTEL_C610_SATA_3, /* Intel C610 SATA */ 572 0, 573 piixsata_chip_map 574 }, 575 { PCI_PRODUCT_INTEL_6SERIES_SATA_1, /* Intel 6 Series SATA */ 576 0, 577 piixsata_chip_map 578 }, 579 { PCI_PRODUCT_INTEL_6SERIES_SATA_2, /* Intel 6 Series SATA */ 580 0, 581 piixsata_chip_map 582 }, 583 { PCI_PRODUCT_INTEL_6SERIES_SATA_3, /* Intel 6 Series SATA */ 584 0, 585 piixsata_chip_map 586 }, 587 { PCI_PRODUCT_INTEL_6SERIES_SATA_4, /* Intel 6 Series SATA */ 588 0, 589 piixsata_chip_map 590 }, 591 { PCI_PRODUCT_INTEL_7SERIES_SATA_1, /* Intel 7 Series SATA */ 592 0, 593 piixsata_chip_map 594 }, 595 { PCI_PRODUCT_INTEL_7SERIES_SATA_2, /* Intel 7 Series SATA */ 596 0, 597 piixsata_chip_map 598 }, 599 { PCI_PRODUCT_INTEL_7SERIES_SATA_3, /* Intel 7 Series SATA */ 600 0, 601 piixsata_chip_map 602 }, 603 { PCI_PRODUCT_INTEL_7SERIES_SATA_4, /* Intel 7 Series SATA */ 604 0, 605 piixsata_chip_map 606 }, 607 { PCI_PRODUCT_INTEL_8SERIES_SATA_1, /* Intel 8 Series SATA */ 608 0, 609 piixsata_chip_map 610 }, 611 { PCI_PRODUCT_INTEL_8SERIES_SATA_2, /* Intel 8 Series SATA */ 612 0, 613 piixsata_chip_map 614 }, 615 { PCI_PRODUCT_INTEL_8SERIES_SATA_3, /* Intel 8 Series SATA */ 616 0, 617 piixsata_chip_map 618 }, 619 { PCI_PRODUCT_INTEL_8SERIES_SATA_4, /* Intel 8 Series SATA */ 620 0, 621 piixsata_chip_map 622 }, 623 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_1, /* Intel 8 Series SATA */ 624 0, 625 piixsata_chip_map 626 }, 627 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_2, /* Intel 8 Series SATA */ 628 0, 629 piixsata_chip_map 630 }, 631 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_3, /* Intel 8 Series SATA */ 632 0, 633 piixsata_chip_map 634 }, 635 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_4, /* Intel 8 Series SATA */ 636 0, 637 piixsata_chip_map 638 }, 639 { PCI_PRODUCT_INTEL_9SERIES_SATA_1, /* Intel 9 Series SATA */ 640 0, 641 piixsata_chip_map 642 }, 643 { PCI_PRODUCT_INTEL_9SERIES_SATA_2, /* Intel 9 Series SATA */ 644 0, 645 piixsata_chip_map 646 }, 647 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_1, /* Intel Atom C2000 SATA */ 648 0, 649 piixsata_chip_map 650 }, 651 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_2, /* Intel Atom C2000 SATA */ 652 0, 653 piixsata_chip_map 654 }, 655 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_3, /* Intel Atom C2000 SATA */ 656 0, 657 piixsata_chip_map 658 }, 659 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_4, /* Intel Atom C2000 SATA */ 660 0, 661 piixsata_chip_map 662 }, 663 { PCI_PRODUCT_INTEL_BAYTRAIL_SATA_1, /* Intel Baytrail SATA */ 664 0, 665 piixsata_chip_map 666 }, 667 { PCI_PRODUCT_INTEL_BAYTRAIL_SATA_2, /* Intel Baytrail SATA */ 668 0, 669 piixsata_chip_map 670 }, 671 { PCI_PRODUCT_INTEL_EP80579_SATA, /* Intel EP80579 SATA */ 672 0, 673 piixsata_chip_map 674 }, 675 { PCI_PRODUCT_INTEL_DH8900_SATA_1, /* Intel DH8900 SATA */ 676 0, 677 piixsata_chip_map 678 }, 679 { PCI_PRODUCT_INTEL_DH8900_SATA_2, /* Intel DH8900 SATA */ 680 0, 681 piixsata_chip_map 682 }, 683 { PCI_PRODUCT_INTEL_SCH_IDE, /* Intel SCH IDE */ 684 0, 685 sch_chip_map 686 } 687 }; 688 689 const struct pciide_product_desc pciide_amd_products[] = { 690 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 691 0, 692 amd756_chip_map 693 }, 694 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 695 0, 696 amd756_chip_map 697 }, 698 { PCI_PRODUCT_AMD_PBC768_IDE, 699 0, 700 amd756_chip_map 701 }, 702 { PCI_PRODUCT_AMD_8111_IDE, 703 0, 704 amd756_chip_map 705 }, 706 { PCI_PRODUCT_AMD_CS5536_IDE, 707 0, 708 amd756_chip_map 709 }, 710 { PCI_PRODUCT_AMD_HUDSON2_IDE, 711 0, 712 ixp_chip_map 713 } 714 }; 715 716 #ifdef notyet 717 const struct pciide_product_desc pciide_opti_products[] = { 718 719 { PCI_PRODUCT_OPTI_82C621, 720 0, 721 opti_chip_map 722 }, 723 { PCI_PRODUCT_OPTI_82C568, 724 0, 725 opti_chip_map 726 }, 727 { PCI_PRODUCT_OPTI_82D568, 728 0, 729 opti_chip_map 730 } 731 }; 732 #endif 733 734 const struct pciide_product_desc pciide_cmd_products[] = { 735 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 736 0, 737 cmd_chip_map 738 }, 739 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 740 0, 741 cmd0643_9_chip_map 742 }, 743 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 744 0, 745 cmd0643_9_chip_map 746 }, 747 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 748 0, 749 cmd0643_9_chip_map 750 }, 751 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 752 0, 753 cmd0643_9_chip_map 754 }, 755 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 756 IDE_PCI_CLASS_OVERRIDE, 757 cmd680_chip_map 758 }, 759 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 760 0, 761 sii3112_chip_map 762 }, 763 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 764 0, 765 sii3112_chip_map 766 }, 767 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 768 0, 769 sii3112_chip_map 770 }, 771 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 772 0, 773 sii3114_chip_map 774 } 775 }; 776 777 const struct pciide_product_desc pciide_via_products[] = { 778 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 779 0, 780 apollo_chip_map 781 }, 782 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 783 0, 784 apollo_chip_map 785 }, 786 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 787 IDE_PCI_CLASS_OVERRIDE, 788 apollo_chip_map 789 }, 790 { PCI_PRODUCT_VIATECH_VT6415, /* VIA VT6415 IDE */ 791 IDE_PCI_CLASS_OVERRIDE, 792 apollo_chip_map 793 }, 794 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 795 0, 796 apollo_chip_map 797 }, 798 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 799 0, 800 apollo_chip_map 801 }, 802 { PCI_PRODUCT_VIATECH_VX855_IDE, /* VIA VX855 IDE */ 803 0, 804 apollo_chip_map 805 }, 806 { PCI_PRODUCT_VIATECH_VX900_IDE, /* VIA VX900 IDE */ 807 0, 808 apollo_chip_map 809 }, 810 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 811 0, 812 sata_chip_map 813 }, 814 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 815 0, 816 sata_chip_map 817 }, 818 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 819 0, 820 sata_chip_map 821 }, 822 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 823 0, 824 sata_chip_map 825 }, 826 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 827 0, 828 sata_chip_map 829 }, 830 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 831 0, 832 sata_chip_map 833 } 834 }; 835 836 const struct pciide_product_desc pciide_cypress_products[] = { 837 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 838 IDE_16BIT_IOSPACE, 839 cy693_chip_map 840 } 841 }; 842 843 const struct pciide_product_desc pciide_sis_products[] = { 844 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 845 0, 846 sis_chip_map 847 }, 848 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 849 0, 850 sata_chip_map 851 }, 852 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 853 0, 854 sata_chip_map 855 }, 856 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 857 0, 858 sata_chip_map 859 }, 860 { PCI_PRODUCT_SIS_1183, /* SIS 1183 SATA */ 861 0, 862 sata_chip_map 863 } 864 }; 865 866 /* 867 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 868 * has been banished to the MD i386 pciide_machdep 869 */ 870 const struct pciide_product_desc pciide_natsemi_products[] = { 871 #ifdef __i386__ 872 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 873 0, 874 gcsc_chip_map 875 }, 876 #endif 877 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 878 0, 879 natsemi_chip_map 880 }, 881 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 882 0, 883 ns_scx200_chip_map 884 } 885 }; 886 887 const struct pciide_product_desc pciide_acer_products[] = { 888 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 889 0, 890 acer_chip_map 891 } 892 }; 893 894 const struct pciide_product_desc pciide_triones_products[] = { 895 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 896 IDE_PCI_CLASS_OVERRIDE, 897 hpt_chip_map, 898 }, 899 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 900 IDE_PCI_CLASS_OVERRIDE, 901 hpt_chip_map 902 }, 903 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 904 IDE_PCI_CLASS_OVERRIDE, 905 hpt_chip_map 906 }, 907 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 908 IDE_PCI_CLASS_OVERRIDE, 909 hpt_chip_map 910 }, 911 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 912 IDE_PCI_CLASS_OVERRIDE, 913 hpt_chip_map 914 } 915 }; 916 917 const struct pciide_product_desc pciide_promise_products[] = { 918 { PCI_PRODUCT_PROMISE_PDC20246, 919 IDE_PCI_CLASS_OVERRIDE, 920 pdc202xx_chip_map, 921 }, 922 { PCI_PRODUCT_PROMISE_PDC20262, 923 IDE_PCI_CLASS_OVERRIDE, 924 pdc202xx_chip_map, 925 }, 926 { PCI_PRODUCT_PROMISE_PDC20265, 927 IDE_PCI_CLASS_OVERRIDE, 928 pdc202xx_chip_map, 929 }, 930 { PCI_PRODUCT_PROMISE_PDC20267, 931 IDE_PCI_CLASS_OVERRIDE, 932 pdc202xx_chip_map, 933 }, 934 { PCI_PRODUCT_PROMISE_PDC20268, 935 IDE_PCI_CLASS_OVERRIDE, 936 pdc202xx_chip_map, 937 }, 938 { PCI_PRODUCT_PROMISE_PDC20268R, 939 IDE_PCI_CLASS_OVERRIDE, 940 pdc202xx_chip_map, 941 }, 942 { PCI_PRODUCT_PROMISE_PDC20269, 943 IDE_PCI_CLASS_OVERRIDE, 944 pdc202xx_chip_map, 945 }, 946 { PCI_PRODUCT_PROMISE_PDC20271, 947 IDE_PCI_CLASS_OVERRIDE, 948 pdc202xx_chip_map, 949 }, 950 { PCI_PRODUCT_PROMISE_PDC20275, 951 IDE_PCI_CLASS_OVERRIDE, 952 pdc202xx_chip_map, 953 }, 954 { PCI_PRODUCT_PROMISE_PDC20276, 955 IDE_PCI_CLASS_OVERRIDE, 956 pdc202xx_chip_map, 957 }, 958 { PCI_PRODUCT_PROMISE_PDC20277, 959 IDE_PCI_CLASS_OVERRIDE, 960 pdc202xx_chip_map, 961 }, 962 { PCI_PRODUCT_PROMISE_PDC20318, 963 IDE_PCI_CLASS_OVERRIDE, 964 pdcsata_chip_map, 965 }, 966 { PCI_PRODUCT_PROMISE_PDC20319, 967 IDE_PCI_CLASS_OVERRIDE, 968 pdcsata_chip_map, 969 }, 970 { PCI_PRODUCT_PROMISE_PDC20371, 971 IDE_PCI_CLASS_OVERRIDE, 972 pdcsata_chip_map, 973 }, 974 { PCI_PRODUCT_PROMISE_PDC20375, 975 IDE_PCI_CLASS_OVERRIDE, 976 pdcsata_chip_map, 977 }, 978 { PCI_PRODUCT_PROMISE_PDC20376, 979 IDE_PCI_CLASS_OVERRIDE, 980 pdcsata_chip_map, 981 }, 982 { PCI_PRODUCT_PROMISE_PDC20377, 983 IDE_PCI_CLASS_OVERRIDE, 984 pdcsata_chip_map, 985 }, 986 { PCI_PRODUCT_PROMISE_PDC20378, 987 IDE_PCI_CLASS_OVERRIDE, 988 pdcsata_chip_map, 989 }, 990 { PCI_PRODUCT_PROMISE_PDC20379, 991 IDE_PCI_CLASS_OVERRIDE, 992 pdcsata_chip_map, 993 }, 994 { PCI_PRODUCT_PROMISE_PDC40518, 995 IDE_PCI_CLASS_OVERRIDE, 996 pdcsata_chip_map, 997 }, 998 { PCI_PRODUCT_PROMISE_PDC40519, 999 IDE_PCI_CLASS_OVERRIDE, 1000 pdcsata_chip_map, 1001 }, 1002 { PCI_PRODUCT_PROMISE_PDC40718, 1003 IDE_PCI_CLASS_OVERRIDE, 1004 pdcsata_chip_map, 1005 }, 1006 { PCI_PRODUCT_PROMISE_PDC40719, 1007 IDE_PCI_CLASS_OVERRIDE, 1008 pdcsata_chip_map, 1009 }, 1010 { PCI_PRODUCT_PROMISE_PDC40779, 1011 IDE_PCI_CLASS_OVERRIDE, 1012 pdcsata_chip_map, 1013 }, 1014 { PCI_PRODUCT_PROMISE_PDC20571, 1015 IDE_PCI_CLASS_OVERRIDE, 1016 pdcsata_chip_map, 1017 }, 1018 { PCI_PRODUCT_PROMISE_PDC20575, 1019 IDE_PCI_CLASS_OVERRIDE, 1020 pdcsata_chip_map, 1021 }, 1022 { PCI_PRODUCT_PROMISE_PDC20579, 1023 IDE_PCI_CLASS_OVERRIDE, 1024 pdcsata_chip_map, 1025 }, 1026 { PCI_PRODUCT_PROMISE_PDC20771, 1027 IDE_PCI_CLASS_OVERRIDE, 1028 pdcsata_chip_map, 1029 }, 1030 { PCI_PRODUCT_PROMISE_PDC20775, 1031 IDE_PCI_CLASS_OVERRIDE, 1032 pdcsata_chip_map, 1033 } 1034 }; 1035 1036 const struct pciide_product_desc pciide_acard_products[] = { 1037 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 1038 IDE_PCI_CLASS_OVERRIDE, 1039 acard_chip_map, 1040 }, 1041 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 1042 IDE_PCI_CLASS_OVERRIDE, 1043 acard_chip_map, 1044 }, 1045 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 1046 IDE_PCI_CLASS_OVERRIDE, 1047 acard_chip_map, 1048 }, 1049 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 1050 IDE_PCI_CLASS_OVERRIDE, 1051 acard_chip_map, 1052 }, 1053 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 1054 IDE_PCI_CLASS_OVERRIDE, 1055 acard_chip_map, 1056 } 1057 }; 1058 1059 const struct pciide_product_desc pciide_serverworks_products[] = { 1060 { PCI_PRODUCT_RCC_OSB4_IDE, 1061 0, 1062 serverworks_chip_map, 1063 }, 1064 { PCI_PRODUCT_RCC_CSB5_IDE, 1065 0, 1066 serverworks_chip_map, 1067 }, 1068 { PCI_PRODUCT_RCC_CSB6_IDE, 1069 0, 1070 serverworks_chip_map, 1071 }, 1072 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 1073 0, 1074 serverworks_chip_map, 1075 }, 1076 { PCI_PRODUCT_RCC_HT_1000_IDE, 1077 0, 1078 serverworks_chip_map, 1079 }, 1080 { PCI_PRODUCT_RCC_K2_SATA, 1081 0, 1082 svwsata_chip_map, 1083 }, 1084 { PCI_PRODUCT_RCC_FRODO4_SATA, 1085 0, 1086 svwsata_chip_map, 1087 }, 1088 { PCI_PRODUCT_RCC_FRODO8_SATA, 1089 0, 1090 svwsata_chip_map, 1091 }, 1092 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 1093 0, 1094 svwsata_chip_map, 1095 }, 1096 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 1097 0, 1098 svwsata_chip_map, 1099 } 1100 }; 1101 1102 const struct pciide_product_desc pciide_nvidia_products[] = { 1103 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 1104 0, 1105 nforce_chip_map 1106 }, 1107 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 1108 0, 1109 nforce_chip_map 1110 }, 1111 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 1112 0, 1113 nforce_chip_map 1114 }, 1115 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 1116 0, 1117 nforce_chip_map 1118 }, 1119 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 1120 0, 1121 nforce_chip_map 1122 }, 1123 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 1124 0, 1125 nforce_chip_map 1126 }, 1127 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 1128 0, 1129 nforce_chip_map 1130 }, 1131 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 1132 0, 1133 nforce_chip_map 1134 }, 1135 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 1136 0, 1137 nforce_chip_map 1138 }, 1139 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 1140 0, 1141 nforce_chip_map 1142 }, 1143 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 1144 0, 1145 nforce_chip_map 1146 }, 1147 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 1148 0, 1149 nforce_chip_map 1150 }, 1151 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 1152 0, 1153 nforce_chip_map 1154 }, 1155 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 1156 0, 1157 nforce_chip_map 1158 }, 1159 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 1160 0, 1161 sata_chip_map 1162 }, 1163 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 1164 0, 1165 sata_chip_map 1166 }, 1167 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 1168 0, 1169 sata_chip_map 1170 }, 1171 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 1172 0, 1173 sata_chip_map 1174 }, 1175 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 1176 0, 1177 sata_chip_map 1178 }, 1179 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 1180 0, 1181 sata_chip_map 1182 }, 1183 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 1184 0, 1185 sata_chip_map 1186 }, 1187 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 1188 0, 1189 sata_chip_map 1190 }, 1191 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1192 0, 1193 sata_chip_map 1194 }, 1195 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1196 0, 1197 sata_chip_map 1198 }, 1199 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1200 0, 1201 sata_chip_map 1202 }, 1203 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1204 0, 1205 sata_chip_map 1206 }, 1207 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1208 0, 1209 sata_chip_map 1210 }, 1211 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1212 0, 1213 sata_chip_map 1214 }, 1215 { PCI_PRODUCT_NVIDIA_MCP65_SATA_1, 1216 0, 1217 sata_chip_map 1218 }, 1219 { PCI_PRODUCT_NVIDIA_MCP65_SATA_2, 1220 0, 1221 sata_chip_map 1222 }, 1223 { PCI_PRODUCT_NVIDIA_MCP65_SATA_3, 1224 0, 1225 sata_chip_map 1226 }, 1227 { PCI_PRODUCT_NVIDIA_MCP65_SATA_4, 1228 0, 1229 sata_chip_map 1230 }, 1231 { PCI_PRODUCT_NVIDIA_MCP67_SATA_1, 1232 0, 1233 sata_chip_map 1234 }, 1235 { PCI_PRODUCT_NVIDIA_MCP67_SATA_2, 1236 0, 1237 sata_chip_map 1238 }, 1239 { PCI_PRODUCT_NVIDIA_MCP67_SATA_3, 1240 0, 1241 sata_chip_map 1242 }, 1243 { PCI_PRODUCT_NVIDIA_MCP67_SATA_4, 1244 0, 1245 sata_chip_map 1246 }, 1247 { PCI_PRODUCT_NVIDIA_MCP77_SATA_1, 1248 0, 1249 sata_chip_map 1250 }, 1251 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1252 0, 1253 sata_chip_map 1254 }, 1255 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1256 0, 1257 sata_chip_map 1258 }, 1259 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1260 0, 1261 sata_chip_map 1262 }, 1263 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1264 0, 1265 sata_chip_map 1266 }, 1267 { PCI_PRODUCT_NVIDIA_MCP89_SATA_1, 1268 0, 1269 sata_chip_map 1270 }, 1271 { PCI_PRODUCT_NVIDIA_MCP89_SATA_2, 1272 0, 1273 sata_chip_map 1274 }, 1275 { PCI_PRODUCT_NVIDIA_MCP89_SATA_3, 1276 0, 1277 sata_chip_map 1278 }, 1279 { PCI_PRODUCT_NVIDIA_MCP89_SATA_4, 1280 0, 1281 sata_chip_map 1282 } 1283 }; 1284 1285 const struct pciide_product_desc pciide_ite_products[] = { 1286 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1287 IDE_PCI_CLASS_OVERRIDE, 1288 ite_chip_map 1289 }, 1290 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1291 IDE_PCI_CLASS_OVERRIDE, 1292 ite_chip_map 1293 } 1294 }; 1295 1296 const struct pciide_product_desc pciide_ati_products[] = { 1297 { PCI_PRODUCT_ATI_SB200_IDE, 1298 0, 1299 ixp_chip_map 1300 }, 1301 { PCI_PRODUCT_ATI_SB300_IDE, 1302 0, 1303 ixp_chip_map 1304 }, 1305 { PCI_PRODUCT_ATI_SB400_IDE, 1306 0, 1307 ixp_chip_map 1308 }, 1309 { PCI_PRODUCT_ATI_SB600_IDE, 1310 0, 1311 ixp_chip_map 1312 }, 1313 { PCI_PRODUCT_ATI_SB700_IDE, 1314 0, 1315 ixp_chip_map 1316 }, 1317 { PCI_PRODUCT_ATI_SB300_SATA, 1318 0, 1319 sii3112_chip_map 1320 }, 1321 { PCI_PRODUCT_ATI_SB400_SATA_1, 1322 0, 1323 sii3112_chip_map 1324 }, 1325 { PCI_PRODUCT_ATI_SB400_SATA_2, 1326 0, 1327 sii3112_chip_map 1328 } 1329 }; 1330 1331 const struct pciide_product_desc pciide_jmicron_products[] = { 1332 { PCI_PRODUCT_JMICRON_JMB361, 1333 0, 1334 jmicron_chip_map 1335 }, 1336 { PCI_PRODUCT_JMICRON_JMB363, 1337 0, 1338 jmicron_chip_map 1339 }, 1340 { PCI_PRODUCT_JMICRON_JMB365, 1341 0, 1342 jmicron_chip_map 1343 }, 1344 { PCI_PRODUCT_JMICRON_JMB366, 1345 0, 1346 jmicron_chip_map 1347 }, 1348 { PCI_PRODUCT_JMICRON_JMB368, 1349 0, 1350 jmicron_chip_map 1351 } 1352 }; 1353 1354 const struct pciide_product_desc pciide_phison_products[] = { 1355 { PCI_PRODUCT_PHISON_PS5000, 1356 0, 1357 phison_chip_map 1358 }, 1359 }; 1360 1361 const struct pciide_product_desc pciide_rdc_products[] = { 1362 { PCI_PRODUCT_RDC_R1012_IDE, 1363 0, 1364 rdc_chip_map 1365 }, 1366 }; 1367 1368 struct pciide_vendor_desc { 1369 u_int32_t ide_vendor; 1370 const struct pciide_product_desc *ide_products; 1371 int ide_nproducts; 1372 }; 1373 1374 const struct pciide_vendor_desc pciide_vendors[] = { 1375 { PCI_VENDOR_INTEL, pciide_intel_products, 1376 nitems(pciide_intel_products) }, 1377 { PCI_VENDOR_AMD, pciide_amd_products, 1378 nitems(pciide_amd_products) }, 1379 #ifdef notyet 1380 { PCI_VENDOR_OPTI, pciide_opti_products, 1381 nitems(pciide_opti_products) }, 1382 #endif 1383 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1384 nitems(pciide_cmd_products) }, 1385 { PCI_VENDOR_VIATECH, pciide_via_products, 1386 nitems(pciide_via_products) }, 1387 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1388 nitems(pciide_cypress_products) }, 1389 { PCI_VENDOR_SIS, pciide_sis_products, 1390 nitems(pciide_sis_products) }, 1391 { PCI_VENDOR_NS, pciide_natsemi_products, 1392 nitems(pciide_natsemi_products) }, 1393 { PCI_VENDOR_ALI, pciide_acer_products, 1394 nitems(pciide_acer_products) }, 1395 { PCI_VENDOR_TRIONES, pciide_triones_products, 1396 nitems(pciide_triones_products) }, 1397 { PCI_VENDOR_ACARD, pciide_acard_products, 1398 nitems(pciide_acard_products) }, 1399 { PCI_VENDOR_RCC, pciide_serverworks_products, 1400 nitems(pciide_serverworks_products) }, 1401 { PCI_VENDOR_PROMISE, pciide_promise_products, 1402 nitems(pciide_promise_products) }, 1403 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1404 nitems(pciide_nvidia_products) }, 1405 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1406 nitems(pciide_ite_products) }, 1407 { PCI_VENDOR_ATI, pciide_ati_products, 1408 nitems(pciide_ati_products) }, 1409 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1410 nitems(pciide_jmicron_products) }, 1411 { PCI_VENDOR_PHISON, pciide_phison_products, 1412 nitems(pciide_phison_products) }, 1413 { PCI_VENDOR_RDC, pciide_rdc_products, 1414 nitems(pciide_rdc_products) } 1415 }; 1416 1417 /* options passed via the 'flags' config keyword */ 1418 #define PCIIDE_OPTIONS_DMA 0x01 1419 1420 int pciide_match(struct device *, void *, void *); 1421 void pciide_attach(struct device *, struct device *, void *); 1422 int pciide_detach(struct device *, int); 1423 int pciide_activate(struct device *, int); 1424 1425 struct cfattach pciide_pci_ca = { 1426 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1427 pciide_detach, pciide_activate 1428 }; 1429 1430 struct cfattach pciide_jmb_ca = { 1431 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1432 pciide_detach, pciide_activate 1433 }; 1434 1435 struct cfdriver pciide_cd = { 1436 NULL, "pciide", DV_DULL 1437 }; 1438 1439 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1440 1441 const struct pciide_product_desc * 1442 pciide_lookup_product(u_int32_t id) 1443 { 1444 const struct pciide_product_desc *pp; 1445 const struct pciide_vendor_desc *vp; 1446 int i; 1447 1448 for (i = 0, vp = pciide_vendors; i < nitems(pciide_vendors); vp++, i++) 1449 if (PCI_VENDOR(id) == vp->ide_vendor) 1450 break; 1451 1452 if (i == nitems(pciide_vendors)) 1453 return (NULL); 1454 1455 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1456 if (PCI_PRODUCT(id) == pp->ide_product) 1457 break; 1458 1459 if (i == vp->ide_nproducts) 1460 return (NULL); 1461 return (pp); 1462 } 1463 1464 int 1465 pciide_match(struct device *parent, void *match, void *aux) 1466 { 1467 struct pci_attach_args *pa = aux; 1468 const struct pciide_product_desc *pp; 1469 1470 /* 1471 * Some IDE controllers have severe bugs when used in PCI mode. 1472 * We punt and attach them to the ISA bus instead. 1473 */ 1474 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1475 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1476 return (0); 1477 1478 /* 1479 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1480 * controllers. Let see if we can deal with it anyway. 1481 */ 1482 pp = pciide_lookup_product(pa->pa_id); 1483 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1484 return (1); 1485 1486 /* 1487 * Check the ID register to see that it's a PCI IDE controller. 1488 * If it is, we assume that we can deal with it; it _should_ 1489 * work in a standardized way... 1490 */ 1491 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1492 switch (PCI_SUBCLASS(pa->pa_class)) { 1493 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1494 return (1); 1495 1496 /* 1497 * We only match these if we know they have 1498 * a match, as we may not support native interfaces 1499 * on them. 1500 */ 1501 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1502 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1503 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1504 if (pp) 1505 return (1); 1506 else 1507 return (0); 1508 break; 1509 } 1510 } 1511 1512 return (0); 1513 } 1514 1515 void 1516 pciide_attach(struct device *parent, struct device *self, void *aux) 1517 { 1518 struct pciide_softc *sc = (struct pciide_softc *)self; 1519 struct pci_attach_args *pa = aux; 1520 1521 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1522 if (sc->sc_pp == NULL) 1523 sc->sc_pp = &default_product_desc; 1524 sc->sc_rev = PCI_REVISION(pa->pa_class); 1525 1526 sc->sc_pc = pa->pa_pc; 1527 sc->sc_tag = pa->pa_tag; 1528 1529 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1530 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1531 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1532 1533 sc->sc_dmacmd_read = pciide_dmacmd_read; 1534 sc->sc_dmacmd_write = pciide_dmacmd_write; 1535 sc->sc_dmactl_read = pciide_dmactl_read; 1536 sc->sc_dmactl_write = pciide_dmactl_write; 1537 sc->sc_dmatbl_write = pciide_dmatbl_write; 1538 1539 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1540 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1541 1542 sc->sc_pp->chip_map(sc, pa); 1543 1544 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1545 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1546 DEBUG_PROBE); 1547 } 1548 1549 int 1550 pciide_detach(struct device *self, int flags) 1551 { 1552 struct pciide_softc *sc = (struct pciide_softc *)self; 1553 if (sc->chip_unmap == NULL) 1554 panic("unmap not yet implemented for this chipset"); 1555 else 1556 sc->chip_unmap(sc, flags); 1557 1558 return 0; 1559 } 1560 1561 int 1562 pciide_activate(struct device *self, int act) 1563 { 1564 int rv = 0; 1565 struct pciide_softc *sc = (struct pciide_softc *)self; 1566 int i; 1567 1568 switch (act) { 1569 case DVACT_SUSPEND: 1570 rv = config_activate_children(self, act); 1571 1572 for (i = 0; i < nitems(sc->sc_save); i++) 1573 sc->sc_save[i] = pci_conf_read(sc->sc_pc, 1574 sc->sc_tag, PCI_MAPREG_END + 0x18 + (i * 4)); 1575 1576 if (sc->sc_pp->chip_map == sch_chip_map) { 1577 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1578 sc->sc_tag, SCH_D0TIM); 1579 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1580 sc->sc_tag, SCH_D1TIM); 1581 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1582 sc->sc_save2[0] = pciide_pci_read(sc->sc_pc, 1583 sc->sc_tag, ICH5_SATA_MAP); 1584 sc->sc_save2[1] = pciide_pci_read(sc->sc_pc, 1585 sc->sc_tag, ICH5_SATA_PI); 1586 sc->sc_save2[2] = pciide_pci_read(sc->sc_pc, 1587 sc->sc_tag, ICH_SATA_PCS); 1588 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1589 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1590 sc->sc_tag, SII3112_SCS_CMD); 1591 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1592 sc->sc_tag, SII3112_PCI_CFGCTL); 1593 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1594 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1595 sc->sc_tag, IT_TIM(0)); 1596 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1597 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1598 sc->sc_tag, NFORCE_PIODMATIM); 1599 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1600 sc->sc_tag, NFORCE_PIOTIM); 1601 sc->sc_save2[2] = pci_conf_read(sc->sc_pc, 1602 sc->sc_tag, NFORCE_UDMATIM); 1603 } 1604 break; 1605 case DVACT_RESUME: 1606 for (i = 0; i < nitems(sc->sc_save); i++) 1607 pci_conf_write(sc->sc_pc, sc->sc_tag, 1608 PCI_MAPREG_END + 0x18 + (i * 4), 1609 sc->sc_save[i]); 1610 1611 if (sc->sc_pp->chip_map == default_chip_map || 1612 sc->sc_pp->chip_map == sata_chip_map || 1613 sc->sc_pp->chip_map == piix_chip_map || 1614 sc->sc_pp->chip_map == amd756_chip_map || 1615 sc->sc_pp->chip_map == phison_chip_map || 1616 sc->sc_pp->chip_map == rdc_chip_map || 1617 sc->sc_pp->chip_map == ixp_chip_map || 1618 sc->sc_pp->chip_map == acard_chip_map || 1619 sc->sc_pp->chip_map == apollo_chip_map || 1620 sc->sc_pp->chip_map == sis_chip_map) { 1621 /* nothing to restore -- uses only 0x40 - 0x56 */ 1622 } else if (sc->sc_pp->chip_map == sch_chip_map) { 1623 pci_conf_write(sc->sc_pc, sc->sc_tag, 1624 SCH_D0TIM, sc->sc_save2[0]); 1625 pci_conf_write(sc->sc_pc, sc->sc_tag, 1626 SCH_D1TIM, sc->sc_save2[1]); 1627 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1628 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1629 ICH5_SATA_MAP, sc->sc_save2[0]); 1630 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1631 ICH5_SATA_PI, sc->sc_save2[1]); 1632 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1633 ICH_SATA_PCS, sc->sc_save2[2]); 1634 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1635 pci_conf_write(sc->sc_pc, sc->sc_tag, 1636 SII3112_SCS_CMD, sc->sc_save2[0]); 1637 delay(50 * 1000); 1638 pci_conf_write(sc->sc_pc, sc->sc_tag, 1639 SII3112_PCI_CFGCTL, sc->sc_save2[1]); 1640 delay(50 * 1000); 1641 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1642 pci_conf_write(sc->sc_pc, sc->sc_tag, 1643 IT_TIM(0), sc->sc_save2[0]); 1644 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1645 pci_conf_write(sc->sc_pc, sc->sc_tag, 1646 NFORCE_PIODMATIM, sc->sc_save2[0]); 1647 pci_conf_write(sc->sc_pc, sc->sc_tag, 1648 NFORCE_PIOTIM, sc->sc_save2[1]); 1649 pci_conf_write(sc->sc_pc, sc->sc_tag, 1650 NFORCE_UDMATIM, sc->sc_save2[2]); 1651 } else { 1652 printf("%s: restore for unknown chip map %x\n", 1653 sc->sc_wdcdev.sc_dev.dv_xname, 1654 sc->sc_pp->ide_product); 1655 } 1656 1657 rv = config_activate_children(self, act); 1658 break; 1659 default: 1660 rv = config_activate_children(self, act); 1661 break; 1662 } 1663 return (rv); 1664 } 1665 1666 int 1667 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1668 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1669 { 1670 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1671 struct channel_softc *wdc_cp = &cp->wdc_channel; 1672 pcireg_t csr; 1673 1674 cp->compat = 1; 1675 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1676 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1677 1678 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1679 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1680 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1681 1682 wdc_cp->cmd_iot = pa->pa_iot; 1683 1684 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1685 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1686 printf("%s: couldn't map %s cmd regs\n", 1687 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1688 return (0); 1689 } 1690 1691 wdc_cp->ctl_iot = pa->pa_iot; 1692 1693 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1694 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1695 printf("%s: couldn't map %s ctl regs\n", 1696 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1697 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1698 PCIIDE_COMPAT_CMD_SIZE); 1699 return (0); 1700 } 1701 wdc_cp->cmd_iosz = *cmdsizep; 1702 wdc_cp->ctl_iosz = *ctlsizep; 1703 1704 return (1); 1705 } 1706 1707 int 1708 pciide_unmapregs_compat(struct pciide_softc *sc, struct pciide_channel *cp) 1709 { 1710 struct channel_softc *wdc_cp = &cp->wdc_channel; 1711 1712 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1713 bus_space_unmap(wdc_cp->ctl_iot, wdc_cp->cmd_ioh, wdc_cp->ctl_iosz); 1714 1715 if (sc->sc_pci_ih != NULL) { 1716 pciide_machdep_compat_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1717 sc->sc_pci_ih = NULL; 1718 } 1719 1720 return (0); 1721 } 1722 1723 int 1724 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1725 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1726 { 1727 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1728 struct channel_softc *wdc_cp = &cp->wdc_channel; 1729 const char *intrstr; 1730 pci_intr_handle_t intrhandle; 1731 pcireg_t maptype; 1732 1733 cp->compat = 0; 1734 1735 if (sc->sc_pci_ih == NULL) { 1736 if (pci_intr_map(pa, &intrhandle) != 0) { 1737 printf("%s: couldn't map native-PCI interrupt\n", 1738 sc->sc_wdcdev.sc_dev.dv_xname); 1739 return (0); 1740 } 1741 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1742 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1743 intrhandle, IPL_BIO, pci_intr, sc, 1744 sc->sc_wdcdev.sc_dev.dv_xname); 1745 if (sc->sc_pci_ih != NULL) { 1746 printf("%s: using %s for native-PCI interrupt\n", 1747 sc->sc_wdcdev.sc_dev.dv_xname, 1748 intrstr ? intrstr : "unknown interrupt"); 1749 } else { 1750 printf("%s: couldn't establish native-PCI interrupt", 1751 sc->sc_wdcdev.sc_dev.dv_xname); 1752 if (intrstr != NULL) 1753 printf(" at %s", intrstr); 1754 printf("\n"); 1755 return (0); 1756 } 1757 } 1758 cp->ih = sc->sc_pci_ih; 1759 sc->sc_pc = pa->pa_pc; 1760 1761 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1762 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1763 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1764 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1765 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1766 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1767 maptype, 0, 1768 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1769 printf("%s: couldn't map %s cmd regs\n", 1770 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1771 return (0); 1772 } 1773 1774 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1775 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1776 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1777 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1778 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1779 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1780 maptype, 0, 1781 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1782 printf("%s: couldn't map %s ctl regs\n", 1783 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1784 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1785 return (0); 1786 } 1787 /* 1788 * In native mode, 4 bytes of I/O space are mapped for the control 1789 * register, the control register is at offset 2. Pass the generic 1790 * code a handle for only one byte at the right offset. 1791 */ 1792 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1793 &wdc_cp->ctl_ioh) != 0) { 1794 printf("%s: unable to subregion %s ctl regs\n", 1795 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1796 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1797 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1798 return (0); 1799 } 1800 wdc_cp->cmd_iosz = *cmdsizep; 1801 wdc_cp->ctl_iosz = *ctlsizep; 1802 1803 return (1); 1804 } 1805 1806 int 1807 pciide_unmapregs_native(struct pciide_softc *sc, struct pciide_channel *cp) 1808 { 1809 struct channel_softc *wdc_cp = &cp->wdc_channel; 1810 1811 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1812 1813 /* Unmap the whole control space, not just the sub-region */ 1814 bus_space_unmap(wdc_cp->ctl_iot, cp->ctl_baseioh, wdc_cp->ctl_iosz); 1815 1816 if (sc->sc_pci_ih != NULL) { 1817 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1818 sc->sc_pci_ih = NULL; 1819 } 1820 1821 return (0); 1822 } 1823 1824 void 1825 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1826 { 1827 pcireg_t maptype; 1828 bus_addr_t addr; 1829 1830 /* 1831 * Map DMA registers 1832 * 1833 * Note that sc_dma_ok is the right variable to test to see if 1834 * DMA can be done. If the interface doesn't support DMA, 1835 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1836 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1837 * non-zero if the interface supports DMA and the registers 1838 * could be mapped. 1839 * 1840 * XXX Note that despite the fact that the Bus Master IDE specs 1841 * XXX say that "The bus master IDE function uses 16 bytes of IO 1842 * XXX space", some controllers (at least the United 1843 * XXX Microelectronics UM8886BF) place it in memory space. 1844 */ 1845 1846 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1847 PCIIDE_REG_BUS_MASTER_DMA); 1848 1849 switch (maptype) { 1850 case PCI_MAPREG_TYPE_IO: 1851 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1852 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1853 &addr, NULL, NULL) == 0); 1854 if (sc->sc_dma_ok == 0) { 1855 printf(", unused (couldn't query registers)"); 1856 break; 1857 } 1858 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1859 && addr >= 0x10000) { 1860 sc->sc_dma_ok = 0; 1861 printf(", unused (registers at unsafe address %#lx)", addr); 1862 break; 1863 } 1864 /* FALLTHROUGH */ 1865 1866 case PCI_MAPREG_MEM_TYPE_32BIT: 1867 sc->sc_dma_ok = (pci_mapreg_map(pa, 1868 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1869 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_iosz, 1870 0) == 0); 1871 sc->sc_dmat = pa->pa_dmat; 1872 if (sc->sc_dma_ok == 0) { 1873 printf(", unused (couldn't map registers)"); 1874 } else { 1875 sc->sc_wdcdev.dma_arg = sc; 1876 sc->sc_wdcdev.dma_init = pciide_dma_init; 1877 sc->sc_wdcdev.dma_start = pciide_dma_start; 1878 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1879 } 1880 break; 1881 1882 default: 1883 sc->sc_dma_ok = 0; 1884 printf(", (unsupported maptype 0x%x)", maptype); 1885 break; 1886 } 1887 } 1888 1889 void 1890 pciide_unmapreg_dma(struct pciide_softc *sc) 1891 { 1892 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_iosz); 1893 } 1894 1895 int 1896 pciide_intr_flag(struct pciide_channel *cp) 1897 { 1898 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1899 int chan = cp->wdc_channel.channel; 1900 1901 if (cp->dma_in_progress) { 1902 int retry = 10; 1903 int status; 1904 1905 /* Check the status register */ 1906 for (retry = 10; retry > 0; retry--) { 1907 status = PCIIDE_DMACTL_READ(sc, chan); 1908 if (status & IDEDMA_CTL_INTR) { 1909 break; 1910 } 1911 DELAY(5); 1912 } 1913 1914 /* Not for us. */ 1915 if (retry == 0) 1916 return (0); 1917 1918 return (1); 1919 } 1920 1921 return (-1); 1922 } 1923 1924 int 1925 pciide_compat_intr(void *arg) 1926 { 1927 struct pciide_channel *cp = arg; 1928 1929 if (pciide_intr_flag(cp) == 0) 1930 return (0); 1931 1932 #ifdef DIAGNOSTIC 1933 /* should only be called for a compat channel */ 1934 if (cp->compat == 0) 1935 panic("pciide compat intr called for non-compat chan %p", cp); 1936 #endif 1937 return (wdcintr(&cp->wdc_channel)); 1938 } 1939 1940 int 1941 pciide_pci_intr(void *arg) 1942 { 1943 struct pciide_softc *sc = arg; 1944 struct pciide_channel *cp; 1945 struct channel_softc *wdc_cp; 1946 int i, rv, crv; 1947 1948 rv = 0; 1949 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1950 cp = &sc->pciide_channels[i]; 1951 wdc_cp = &cp->wdc_channel; 1952 1953 /* If a compat channel skip. */ 1954 if (cp->compat) 1955 continue; 1956 1957 if (cp->hw_ok == 0) 1958 continue; 1959 1960 if (pciide_intr_flag(cp) == 0) 1961 continue; 1962 1963 crv = wdcintr(wdc_cp); 1964 if (crv == 0) 1965 ; /* leave rv alone */ 1966 else if (crv == 1) 1967 rv = 1; /* claim the intr */ 1968 else if (rv == 0) /* crv should be -1 in this case */ 1969 rv = crv; /* if we've done no better, take it */ 1970 } 1971 return (rv); 1972 } 1973 1974 u_int8_t 1975 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1976 { 1977 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1978 IDEDMA_CMD(chan))); 1979 } 1980 1981 void 1982 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1983 { 1984 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1985 IDEDMA_CMD(chan), val); 1986 } 1987 1988 u_int8_t 1989 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1990 { 1991 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1992 IDEDMA_CTL(chan))); 1993 } 1994 1995 void 1996 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 1997 { 1998 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1999 IDEDMA_CTL(chan), val); 2000 } 2001 2002 void 2003 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 2004 { 2005 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 2006 IDEDMA_TBL(chan), val); 2007 } 2008 2009 void 2010 pciide_channel_dma_setup(struct pciide_channel *cp) 2011 { 2012 int drive; 2013 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2014 struct ata_drive_datas *drvp; 2015 2016 for (drive = 0; drive < 2; drive++) { 2017 drvp = &cp->wdc_channel.ch_drive[drive]; 2018 /* If no drive, skip */ 2019 if ((drvp->drive_flags & DRIVE) == 0) 2020 continue; 2021 /* setup DMA if needed */ 2022 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2023 (drvp->drive_flags & DRIVE_UDMA) == 0) || 2024 sc->sc_dma_ok == 0) { 2025 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 2026 continue; 2027 } 2028 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 2029 != 0) { 2030 /* Abort DMA setup */ 2031 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 2032 continue; 2033 } 2034 } 2035 } 2036 2037 int 2038 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 2039 { 2040 bus_dma_segment_t seg; 2041 int error, rseg; 2042 const bus_size_t dma_table_size = 2043 sizeof(struct idedma_table) * NIDEDMA_TABLES; 2044 struct pciide_dma_maps *dma_maps = 2045 &sc->pciide_channels[channel].dma_maps[drive]; 2046 2047 /* If table was already allocated, just return */ 2048 if (dma_maps->dma_table) 2049 return (0); 2050 2051 /* Allocate memory for the DMA tables and map it */ 2052 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 2053 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 2054 BUS_DMA_NOWAIT)) != 0) { 2055 printf("%s:%d: unable to allocate table DMA for " 2056 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2057 channel, drive, error); 2058 return (error); 2059 } 2060 2061 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 2062 dma_table_size, 2063 (caddr_t *)&dma_maps->dma_table, 2064 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 2065 printf("%s:%d: unable to map table DMA for" 2066 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2067 channel, drive, error); 2068 return (error); 2069 } 2070 2071 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 2072 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 2073 seg.ds_addr), DEBUG_PROBE); 2074 2075 /* Create and load table DMA map for this disk */ 2076 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 2077 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 2078 &dma_maps->dmamap_table)) != 0) { 2079 printf("%s:%d: unable to create table DMA map for " 2080 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2081 channel, drive, error); 2082 return (error); 2083 } 2084 if ((error = bus_dmamap_load(sc->sc_dmat, 2085 dma_maps->dmamap_table, 2086 dma_maps->dma_table, 2087 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 2088 printf("%s:%d: unable to load table DMA map for " 2089 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2090 channel, drive, error); 2091 return (error); 2092 } 2093 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 2094 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 2095 /* Create a xfer DMA map for this drive */ 2096 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 2097 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 2098 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2099 &dma_maps->dmamap_xfer)) != 0) { 2100 printf("%s:%d: unable to create xfer DMA map for " 2101 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2102 channel, drive, error); 2103 return (error); 2104 } 2105 return (0); 2106 } 2107 2108 int 2109 pciide_dma_init(void *v, int channel, int drive, void *databuf, 2110 size_t datalen, int flags) 2111 { 2112 struct pciide_softc *sc = v; 2113 int error, seg; 2114 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2115 struct pciide_dma_maps *dma_maps = 2116 &sc->pciide_channels[channel].dma_maps[drive]; 2117 #ifndef BUS_DMA_RAW 2118 #define BUS_DMA_RAW 0 2119 #endif 2120 2121 error = bus_dmamap_load(sc->sc_dmat, 2122 dma_maps->dmamap_xfer, 2123 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 2124 if (error) { 2125 printf("%s:%d: unable to load xfer DMA map for " 2126 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2127 channel, drive, error); 2128 return (error); 2129 } 2130 2131 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2132 dma_maps->dmamap_xfer->dm_mapsize, 2133 (flags & WDC_DMA_READ) ? 2134 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2135 2136 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 2137 #ifdef DIAGNOSTIC 2138 /* A segment must not cross a 64k boundary */ 2139 { 2140 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 2141 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 2142 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 2143 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 2144 printf("pciide_dma: segment %d physical addr 0x%lx" 2145 " len 0x%lx not properly aligned\n", 2146 seg, phys, len); 2147 panic("pciide_dma: buf align"); 2148 } 2149 } 2150 #endif 2151 dma_maps->dma_table[seg].base_addr = 2152 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 2153 dma_maps->dma_table[seg].byte_count = 2154 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 2155 IDEDMA_BYTE_COUNT_MASK); 2156 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 2157 seg, letoh32(dma_maps->dma_table[seg].byte_count), 2158 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 2159 2160 } 2161 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 2162 htole32(IDEDMA_BYTE_COUNT_EOT); 2163 2164 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 2165 dma_maps->dmamap_table->dm_mapsize, 2166 BUS_DMASYNC_PREWRITE); 2167 2168 /* Maps are ready. Start DMA function */ 2169 #ifdef DIAGNOSTIC 2170 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 2171 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 2172 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2173 panic("pciide_dma_init: table align"); 2174 } 2175 #endif 2176 2177 /* Clear status bits */ 2178 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 2179 /* Write table addr */ 2180 PCIIDE_DMATBL_WRITE(sc, channel, 2181 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2182 /* set read/write */ 2183 PCIIDE_DMACMD_WRITE(sc, channel, 2184 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 2185 /* remember flags */ 2186 dma_maps->dma_flags = flags; 2187 return (0); 2188 } 2189 2190 void 2191 pciide_dma_start(void *v, int channel, int drive) 2192 { 2193 struct pciide_softc *sc = v; 2194 2195 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 2196 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 2197 IDEDMA_CMD_START); 2198 2199 sc->pciide_channels[channel].dma_in_progress = 1; 2200 } 2201 2202 int 2203 pciide_dma_finish(void *v, int channel, int drive, int force) 2204 { 2205 struct pciide_softc *sc = v; 2206 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2207 u_int8_t status; 2208 int error = 0; 2209 struct pciide_dma_maps *dma_maps = 2210 &sc->pciide_channels[channel].dma_maps[drive]; 2211 2212 status = PCIIDE_DMACTL_READ(sc, channel); 2213 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 2214 DEBUG_XFERS); 2215 if (status == 0xff) 2216 return (status); 2217 2218 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 2219 error = WDC_DMAST_NOIRQ; 2220 goto done; 2221 } 2222 2223 /* stop DMA channel */ 2224 PCIIDE_DMACMD_WRITE(sc, channel, 2225 ((dma_maps->dma_flags & WDC_DMA_READ) ? 2226 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 2227 2228 /* Unload the map of the data buffer */ 2229 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2230 dma_maps->dmamap_xfer->dm_mapsize, 2231 (dma_maps->dma_flags & WDC_DMA_READ) ? 2232 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2233 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 2234 2235 /* Clear status bits */ 2236 PCIIDE_DMACTL_WRITE(sc, channel, status); 2237 2238 if ((status & IDEDMA_CTL_ERR) != 0) { 2239 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 2240 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 2241 error |= WDC_DMAST_ERR; 2242 } 2243 2244 if ((status & IDEDMA_CTL_INTR) == 0) { 2245 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 2246 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 2247 drive, status); 2248 error |= WDC_DMAST_NOIRQ; 2249 } 2250 2251 if ((status & IDEDMA_CTL_ACT) != 0) { 2252 /* data underrun, may be a valid condition for ATAPI */ 2253 error |= WDC_DMAST_UNDER; 2254 } 2255 2256 done: 2257 sc->pciide_channels[channel].dma_in_progress = 0; 2258 return (error); 2259 } 2260 2261 void 2262 pciide_irqack(struct channel_softc *chp) 2263 { 2264 struct pciide_channel *cp = (struct pciide_channel *)chp; 2265 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2266 int chan = chp->channel; 2267 2268 /* clear status bits in IDE DMA registers */ 2269 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 2270 } 2271 2272 /* some common code used by several chip_map */ 2273 int 2274 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 2275 { 2276 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2277 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2278 cp->name = PCIIDE_CHANNEL_NAME(channel); 2279 cp->wdc_channel.channel = channel; 2280 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2281 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 2282 if (cp->wdc_channel.ch_queue == NULL) { 2283 printf("%s: %s " 2284 "cannot allocate channel queue", 2285 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2286 return (0); 2287 } 2288 cp->hw_ok = 1; 2289 2290 return (1); 2291 } 2292 2293 void 2294 pciide_chanfree(struct pciide_softc *sc, int channel) 2295 { 2296 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2297 if (cp->wdc_channel.ch_queue) 2298 wdc_free_queue(cp->wdc_channel.ch_queue); 2299 } 2300 2301 /* some common code used by several chip channel_map */ 2302 void 2303 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 2304 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 2305 int (*pci_intr)(void *)) 2306 { 2307 struct channel_softc *wdc_cp = &cp->wdc_channel; 2308 2309 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 2310 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 2311 pci_intr); 2312 else 2313 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2314 wdc_cp->channel, cmdsizep, ctlsizep); 2315 if (cp->hw_ok == 0) 2316 return; 2317 wdc_cp->data32iot = wdc_cp->cmd_iot; 2318 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 2319 wdcattach(wdc_cp); 2320 } 2321 2322 void 2323 pciide_unmap_chan(struct pciide_softc *sc, struct pciide_channel *cp, int flags) 2324 { 2325 struct channel_softc *wdc_cp = &cp->wdc_channel; 2326 2327 wdcdetach(wdc_cp, flags); 2328 2329 if (cp->compat != 0) 2330 pciide_unmapregs_compat(sc, cp); 2331 else 2332 pciide_unmapregs_native(sc, cp); 2333 } 2334 2335 /* 2336 * Generic code to call to know if a channel can be disabled. Return 1 2337 * if channel can be disabled, 0 if not 2338 */ 2339 int 2340 pciide_chan_candisable(struct pciide_channel *cp) 2341 { 2342 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2343 struct channel_softc *wdc_cp = &cp->wdc_channel; 2344 2345 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 2346 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 2347 printf("%s: %s disabled (no drives)\n", 2348 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2349 cp->hw_ok = 0; 2350 return (1); 2351 } 2352 return (0); 2353 } 2354 2355 /* 2356 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 2357 * Set hw_ok=0 on failure 2358 */ 2359 void 2360 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2361 int compatchan, int interface) 2362 { 2363 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2364 struct channel_softc *wdc_cp = &cp->wdc_channel; 2365 2366 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2367 return; 2368 2369 cp->compat = 1; 2370 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 2371 pa, compatchan, pciide_compat_intr, cp); 2372 if (cp->ih == NULL) { 2373 printf("%s: no compatibility interrupt for use by %s\n", 2374 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2375 cp->hw_ok = 0; 2376 } 2377 } 2378 2379 /* 2380 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 2381 * Set hw_ok=0 on failure 2382 */ 2383 void 2384 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2385 int compatchan, int interface) 2386 { 2387 struct channel_softc *wdc_cp = &cp->wdc_channel; 2388 2389 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2390 return; 2391 2392 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 2393 } 2394 2395 void 2396 pciide_print_channels(int nchannels, pcireg_t interface) 2397 { 2398 int i; 2399 2400 for (i = 0; i < nchannels; i++) { 2401 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 2402 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 2403 "configured" : "wired", 2404 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 2405 "compatibility"); 2406 } 2407 2408 printf("\n"); 2409 } 2410 2411 void 2412 pciide_print_modes(struct pciide_channel *cp) 2413 { 2414 wdc_print_current_modes(&cp->wdc_channel); 2415 } 2416 2417 void 2418 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2419 { 2420 struct pciide_channel *cp; 2421 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2422 pcireg_t csr; 2423 int channel, drive; 2424 struct ata_drive_datas *drvp; 2425 u_int8_t idedma_ctl; 2426 bus_size_t cmdsize, ctlsize; 2427 char *failreason; 2428 2429 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2430 printf(": DMA"); 2431 if (sc->sc_pp == &default_product_desc && 2432 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2433 PCIIDE_OPTIONS_DMA) == 0) { 2434 printf(" (unsupported)"); 2435 sc->sc_dma_ok = 0; 2436 } else { 2437 pciide_mapreg_dma(sc, pa); 2438 if (sc->sc_dma_ok != 0) 2439 printf(", (partial support)"); 2440 } 2441 } else { 2442 printf(": no DMA"); 2443 sc->sc_dma_ok = 0; 2444 } 2445 if (sc->sc_dma_ok) { 2446 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2447 sc->sc_wdcdev.irqack = pciide_irqack; 2448 } 2449 sc->sc_wdcdev.PIO_cap = 0; 2450 sc->sc_wdcdev.DMA_cap = 0; 2451 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2452 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2453 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2454 2455 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2456 2457 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2458 cp = &sc->pciide_channels[channel]; 2459 if (pciide_chansetup(sc, channel, interface) == 0) 2460 continue; 2461 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2462 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2463 &ctlsize, pciide_pci_intr); 2464 } else { 2465 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2466 channel, &cmdsize, &ctlsize); 2467 } 2468 if (cp->hw_ok == 0) 2469 continue; 2470 /* 2471 * Check to see if something appears to be there. 2472 */ 2473 failreason = NULL; 2474 pciide_map_compat_intr(pa, cp, channel, interface); 2475 if (cp->hw_ok == 0) 2476 continue; 2477 if (!wdcprobe(&cp->wdc_channel)) { 2478 failreason = "not responding; disabled or no drives?"; 2479 goto next; 2480 } 2481 /* 2482 * Now, make sure it's actually attributable to this PCI IDE 2483 * channel by trying to access the channel again while the 2484 * PCI IDE controller's I/O space is disabled. (If the 2485 * channel no longer appears to be there, it belongs to 2486 * this controller.) YUCK! 2487 */ 2488 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2489 PCI_COMMAND_STATUS_REG); 2490 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2491 csr & ~PCI_COMMAND_IO_ENABLE); 2492 if (wdcprobe(&cp->wdc_channel)) 2493 failreason = "other hardware responding at addresses"; 2494 pci_conf_write(sc->sc_pc, sc->sc_tag, 2495 PCI_COMMAND_STATUS_REG, csr); 2496 next: 2497 if (failreason) { 2498 printf("%s: %s ignored (%s)\n", 2499 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2500 failreason); 2501 cp->hw_ok = 0; 2502 pciide_unmap_compat_intr(pa, cp, channel, interface); 2503 bus_space_unmap(cp->wdc_channel.cmd_iot, 2504 cp->wdc_channel.cmd_ioh, cmdsize); 2505 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2506 bus_space_unmap(cp->wdc_channel.ctl_iot, 2507 cp->ctl_baseioh, ctlsize); 2508 else 2509 bus_space_unmap(cp->wdc_channel.ctl_iot, 2510 cp->wdc_channel.ctl_ioh, ctlsize); 2511 } 2512 if (cp->hw_ok) { 2513 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2514 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2515 wdcattach(&cp->wdc_channel); 2516 } 2517 } 2518 2519 if (sc->sc_dma_ok == 0) 2520 return; 2521 2522 /* Allocate DMA maps */ 2523 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2524 idedma_ctl = 0; 2525 cp = &sc->pciide_channels[channel]; 2526 for (drive = 0; drive < 2; drive++) { 2527 drvp = &cp->wdc_channel.ch_drive[drive]; 2528 /* If no drive, skip */ 2529 if ((drvp->drive_flags & DRIVE) == 0) 2530 continue; 2531 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2532 continue; 2533 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2534 /* Abort DMA setup */ 2535 printf("%s:%d:%d: cannot allocate DMA maps, " 2536 "using PIO transfers\n", 2537 sc->sc_wdcdev.sc_dev.dv_xname, 2538 channel, drive); 2539 drvp->drive_flags &= ~DRIVE_DMA; 2540 } 2541 printf("%s:%d:%d: using DMA data transfers\n", 2542 sc->sc_wdcdev.sc_dev.dv_xname, 2543 channel, drive); 2544 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2545 } 2546 if (idedma_ctl != 0) { 2547 /* Add software bits in status register */ 2548 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2549 } 2550 } 2551 } 2552 2553 void 2554 default_chip_unmap(struct pciide_softc *sc, int flags) 2555 { 2556 struct pciide_channel *cp; 2557 int channel; 2558 2559 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2560 cp = &sc->pciide_channels[channel]; 2561 pciide_unmap_chan(sc, cp, flags); 2562 pciide_chanfree(sc, channel); 2563 } 2564 2565 pciide_unmapreg_dma(sc); 2566 2567 if (sc->sc_cookie) 2568 free(sc->sc_cookie, M_DEVBUF, sc->sc_cookielen); 2569 } 2570 2571 void 2572 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2573 { 2574 struct pciide_channel *cp; 2575 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2576 int channel; 2577 bus_size_t cmdsize, ctlsize; 2578 2579 if (interface == 0) { 2580 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2581 DEBUG_PROBE); 2582 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2583 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2584 } 2585 2586 printf(": DMA"); 2587 pciide_mapreg_dma(sc, pa); 2588 printf("\n"); 2589 2590 if (sc->sc_dma_ok) { 2591 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2592 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2593 sc->sc_wdcdev.irqack = pciide_irqack; 2594 } 2595 sc->sc_wdcdev.PIO_cap = 4; 2596 sc->sc_wdcdev.DMA_cap = 2; 2597 sc->sc_wdcdev.UDMA_cap = 6; 2598 2599 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2600 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2601 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2602 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2603 sc->sc_wdcdev.set_modes = sata_setup_channel; 2604 sc->chip_unmap = default_chip_unmap; 2605 2606 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2607 cp = &sc->pciide_channels[channel]; 2608 if (pciide_chansetup(sc, channel, interface) == 0) 2609 continue; 2610 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2611 pciide_pci_intr); 2612 sata_setup_channel(&cp->wdc_channel); 2613 } 2614 } 2615 2616 void 2617 sata_setup_channel(struct channel_softc *chp) 2618 { 2619 struct ata_drive_datas *drvp; 2620 int drive; 2621 u_int32_t idedma_ctl; 2622 struct pciide_channel *cp = (struct pciide_channel *)chp; 2623 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2624 2625 /* setup DMA if needed */ 2626 pciide_channel_dma_setup(cp); 2627 2628 idedma_ctl = 0; 2629 2630 for (drive = 0; drive < 2; drive++) { 2631 drvp = &chp->ch_drive[drive]; 2632 /* If no drive, skip */ 2633 if ((drvp->drive_flags & DRIVE) == 0) 2634 continue; 2635 if (drvp->drive_flags & DRIVE_UDMA) { 2636 /* use Ultra/DMA */ 2637 drvp->drive_flags &= ~DRIVE_DMA; 2638 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2639 } else if (drvp->drive_flags & DRIVE_DMA) { 2640 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2641 } 2642 } 2643 2644 /* 2645 * Nothing to do to setup modes; it is meaningless in S-ATA 2646 * (but many S-ATA drives still want to get the SET_FEATURE 2647 * command). 2648 */ 2649 if (idedma_ctl != 0) { 2650 /* Add software bits in status register */ 2651 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2652 } 2653 pciide_print_modes(cp); 2654 } 2655 2656 void 2657 piix_timing_debug(struct pciide_softc *sc) 2658 { 2659 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2660 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2661 DEBUG_PROBE); 2662 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2663 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2664 WDCDEBUG_PRINT((", sidetim=0x%x", 2665 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2666 DEBUG_PROBE); 2667 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2668 WDCDEBUG_PRINT((", udmareg 0x%x", 2669 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2670 DEBUG_PROBE); 2671 } 2672 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2673 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2674 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2675 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2676 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2677 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2678 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2679 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2680 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2681 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2682 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2683 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2684 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2685 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2686 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2687 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2688 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2689 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2690 DEBUG_PROBE); 2691 } 2692 } 2693 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2694 } 2695 2696 void 2697 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2698 { 2699 struct pciide_channel *cp; 2700 int channel; 2701 u_int32_t idetim; 2702 bus_size_t cmdsize, ctlsize; 2703 2704 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2705 2706 printf(": DMA"); 2707 pciide_mapreg_dma(sc, pa); 2708 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2709 WDC_CAPABILITY_MODE; 2710 if (sc->sc_dma_ok) { 2711 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2712 sc->sc_wdcdev.irqack = pciide_irqack; 2713 switch (sc->sc_pp->ide_product) { 2714 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2715 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2716 case PCI_PRODUCT_INTEL_82371AB_IDE: 2717 case PCI_PRODUCT_INTEL_82372FB_IDE: 2718 case PCI_PRODUCT_INTEL_82440MX_IDE: 2719 case PCI_PRODUCT_INTEL_82451NX: 2720 case PCI_PRODUCT_INTEL_82801AA_IDE: 2721 case PCI_PRODUCT_INTEL_82801AB_IDE: 2722 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2723 case PCI_PRODUCT_INTEL_82801BA_IDE: 2724 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2725 case PCI_PRODUCT_INTEL_82801CA_IDE: 2726 case PCI_PRODUCT_INTEL_82801DB_IDE: 2727 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2728 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2729 case PCI_PRODUCT_INTEL_82801EB_IDE: 2730 case PCI_PRODUCT_INTEL_82801FB_IDE: 2731 case PCI_PRODUCT_INTEL_82801GB_IDE: 2732 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2733 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2734 break; 2735 } 2736 } 2737 sc->sc_wdcdev.PIO_cap = 4; 2738 sc->sc_wdcdev.DMA_cap = 2; 2739 switch (sc->sc_pp->ide_product) { 2740 case PCI_PRODUCT_INTEL_82801AA_IDE: 2741 case PCI_PRODUCT_INTEL_82372FB_IDE: 2742 sc->sc_wdcdev.UDMA_cap = 4; 2743 break; 2744 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2745 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2746 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2747 case PCI_PRODUCT_INTEL_82801BA_IDE: 2748 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2749 case PCI_PRODUCT_INTEL_82801CA_IDE: 2750 case PCI_PRODUCT_INTEL_82801DB_IDE: 2751 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2752 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2753 case PCI_PRODUCT_INTEL_82801EB_IDE: 2754 case PCI_PRODUCT_INTEL_82801FB_IDE: 2755 case PCI_PRODUCT_INTEL_82801GB_IDE: 2756 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2757 sc->sc_wdcdev.UDMA_cap = 5; 2758 break; 2759 default: 2760 sc->sc_wdcdev.UDMA_cap = 2; 2761 break; 2762 } 2763 2764 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2765 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2766 sc->sc_wdcdev.set_modes = piix_setup_channel; 2767 } else { 2768 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2769 } 2770 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2771 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2772 2773 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2774 2775 piix_timing_debug(sc); 2776 2777 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2778 cp = &sc->pciide_channels[channel]; 2779 2780 if (pciide_chansetup(sc, channel, interface) == 0) 2781 continue; 2782 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2783 if ((PIIX_IDETIM_READ(idetim, channel) & 2784 PIIX_IDETIM_IDE) == 0) { 2785 printf("%s: %s ignored (disabled)\n", 2786 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2787 cp->hw_ok = 0; 2788 continue; 2789 } 2790 pciide_map_compat_intr(pa, cp, channel, interface); 2791 if (cp->hw_ok == 0) 2792 continue; 2793 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2794 pciide_pci_intr); 2795 if (cp->hw_ok == 0) 2796 goto next; 2797 if (pciide_chan_candisable(cp)) { 2798 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2799 channel); 2800 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2801 idetim); 2802 } 2803 if (cp->hw_ok == 0) 2804 goto next; 2805 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2806 next: 2807 if (cp->hw_ok == 0) 2808 pciide_unmap_compat_intr(pa, cp, channel, interface); 2809 } 2810 2811 piix_timing_debug(sc); 2812 } 2813 2814 void 2815 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2816 { 2817 struct pciide_channel *cp; 2818 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2819 int channel; 2820 bus_size_t cmdsize, ctlsize; 2821 u_int8_t reg, ich = 0; 2822 2823 printf(": DMA"); 2824 pciide_mapreg_dma(sc, pa); 2825 2826 if (sc->sc_dma_ok) { 2827 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2828 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2829 sc->sc_wdcdev.irqack = pciide_irqack; 2830 sc->sc_wdcdev.DMA_cap = 2; 2831 sc->sc_wdcdev.UDMA_cap = 6; 2832 } 2833 sc->sc_wdcdev.PIO_cap = 4; 2834 2835 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2836 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2837 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2838 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2839 sc->sc_wdcdev.set_modes = sata_setup_channel; 2840 2841 switch(sc->sc_pp->ide_product) { 2842 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2843 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2844 case PCI_PRODUCT_INTEL_82801EB_SATA: 2845 case PCI_PRODUCT_INTEL_82801ER_SATA: 2846 ich = 5; 2847 break; 2848 case PCI_PRODUCT_INTEL_82801FB_SATA: 2849 case PCI_PRODUCT_INTEL_82801FR_SATA: 2850 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2851 ich = 6; 2852 break; 2853 default: 2854 ich = 7; 2855 break; 2856 } 2857 2858 /* 2859 * Put the SATA portion of controllers that don't operate in combined 2860 * mode into native PCI modes so the maximum number of devices can be 2861 * used. Intel calls this "enhanced mode" 2862 */ 2863 if (ich == 5) { 2864 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2865 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2866 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2867 ICH5_SATA_PI); 2868 reg |= ICH5_SATA_PI_PRI_NATIVE | 2869 ICH5_SATA_PI_SEC_NATIVE; 2870 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2871 ICH5_SATA_PI, reg); 2872 interface |= PCIIDE_INTERFACE_PCI(0) | 2873 PCIIDE_INTERFACE_PCI(1); 2874 } 2875 } else { 2876 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2877 ICH6_SATA_MAP_CMB_MASK; 2878 if (reg != ICH6_SATA_MAP_CMB_PRI && 2879 reg != ICH6_SATA_MAP_CMB_SEC) { 2880 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2881 ICH5_SATA_PI); 2882 reg |= ICH5_SATA_PI_PRI_NATIVE | 2883 ICH5_SATA_PI_SEC_NATIVE; 2884 2885 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2886 ICH5_SATA_PI, reg); 2887 interface |= PCIIDE_INTERFACE_PCI(0) | 2888 PCIIDE_INTERFACE_PCI(1); 2889 2890 /* 2891 * Ask for SATA IDE Mode, we don't need to do this 2892 * for the combined mode case as combined mode is 2893 * only allowed in IDE Mode 2894 */ 2895 if (ich >= 7) { 2896 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2897 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2898 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2899 ICH5_SATA_MAP, reg); 2900 } 2901 } 2902 } 2903 2904 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2905 2906 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2907 cp = &sc->pciide_channels[channel]; 2908 if (pciide_chansetup(sc, channel, interface) == 0) 2909 continue; 2910 2911 pciide_map_compat_intr(pa, cp, channel, interface); 2912 if (cp->hw_ok == 0) 2913 continue; 2914 2915 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2916 pciide_pci_intr); 2917 if (cp->hw_ok != 0) 2918 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2919 2920 if (cp->hw_ok == 0) 2921 pciide_unmap_compat_intr(pa, cp, channel, interface); 2922 } 2923 } 2924 2925 void 2926 piix_setup_channel(struct channel_softc *chp) 2927 { 2928 u_int8_t mode[2], drive; 2929 u_int32_t oidetim, idetim, idedma_ctl; 2930 struct pciide_channel *cp = (struct pciide_channel *)chp; 2931 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2932 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2933 2934 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2935 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2936 idedma_ctl = 0; 2937 2938 /* set up new idetim: Enable IDE registers decode */ 2939 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2940 chp->channel); 2941 2942 /* setup DMA */ 2943 pciide_channel_dma_setup(cp); 2944 2945 /* 2946 * Here we have to mess up with drives mode: PIIX can't have 2947 * different timings for master and slave drives. 2948 * We need to find the best combination. 2949 */ 2950 2951 /* If both drives supports DMA, take the lower mode */ 2952 if ((drvp[0].drive_flags & DRIVE_DMA) && 2953 (drvp[1].drive_flags & DRIVE_DMA)) { 2954 mode[0] = mode[1] = 2955 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2956 drvp[0].DMA_mode = mode[0]; 2957 drvp[1].DMA_mode = mode[1]; 2958 goto ok; 2959 } 2960 /* 2961 * If only one drive supports DMA, use its mode, and 2962 * put the other one in PIO mode 0 if mode not compatible 2963 */ 2964 if (drvp[0].drive_flags & DRIVE_DMA) { 2965 mode[0] = drvp[0].DMA_mode; 2966 mode[1] = drvp[1].PIO_mode; 2967 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2968 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2969 mode[1] = drvp[1].PIO_mode = 0; 2970 goto ok; 2971 } 2972 if (drvp[1].drive_flags & DRIVE_DMA) { 2973 mode[1] = drvp[1].DMA_mode; 2974 mode[0] = drvp[0].PIO_mode; 2975 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2976 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2977 mode[0] = drvp[0].PIO_mode = 0; 2978 goto ok; 2979 } 2980 /* 2981 * If both drives are not DMA, takes the lower mode, unless 2982 * one of them is PIO mode < 2 2983 */ 2984 if (drvp[0].PIO_mode < 2) { 2985 mode[0] = drvp[0].PIO_mode = 0; 2986 mode[1] = drvp[1].PIO_mode; 2987 } else if (drvp[1].PIO_mode < 2) { 2988 mode[1] = drvp[1].PIO_mode = 0; 2989 mode[0] = drvp[0].PIO_mode; 2990 } else { 2991 mode[0] = mode[1] = 2992 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 2993 drvp[0].PIO_mode = mode[0]; 2994 drvp[1].PIO_mode = mode[1]; 2995 } 2996 ok: /* The modes are setup */ 2997 for (drive = 0; drive < 2; drive++) { 2998 if (drvp[drive].drive_flags & DRIVE_DMA) { 2999 idetim |= piix_setup_idetim_timings( 3000 mode[drive], 1, chp->channel); 3001 goto end; 3002 } 3003 } 3004 /* If we are there, none of the drives are DMA */ 3005 if (mode[0] >= 2) 3006 idetim |= piix_setup_idetim_timings( 3007 mode[0], 0, chp->channel); 3008 else 3009 idetim |= piix_setup_idetim_timings( 3010 mode[1], 0, chp->channel); 3011 end: /* 3012 * timing mode is now set up in the controller. Enable 3013 * it per-drive 3014 */ 3015 for (drive = 0; drive < 2; drive++) { 3016 /* If no drive, skip */ 3017 if ((drvp[drive].drive_flags & DRIVE) == 0) 3018 continue; 3019 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 3020 if (drvp[drive].drive_flags & DRIVE_DMA) 3021 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3022 } 3023 if (idedma_ctl != 0) { 3024 /* Add software bits in status register */ 3025 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3026 IDEDMA_CTL(chp->channel), 3027 idedma_ctl); 3028 } 3029 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 3030 pciide_print_modes(cp); 3031 } 3032 3033 void 3034 piix3_4_setup_channel(struct channel_softc *chp) 3035 { 3036 struct ata_drive_datas *drvp; 3037 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 3038 struct pciide_channel *cp = (struct pciide_channel *)chp; 3039 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3040 int drive; 3041 int channel = chp->channel; 3042 3043 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 3044 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 3045 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 3046 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 3047 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 3048 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 3049 PIIX_SIDETIM_RTC_MASK(channel)); 3050 3051 idedma_ctl = 0; 3052 /* If channel disabled, no need to go further */ 3053 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 3054 return; 3055 /* set up new idetim: Enable IDE registers decode */ 3056 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 3057 3058 /* setup DMA if needed */ 3059 pciide_channel_dma_setup(cp); 3060 3061 for (drive = 0; drive < 2; drive++) { 3062 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 3063 PIIX_UDMATIM_SET(0x3, channel, drive)); 3064 drvp = &chp->ch_drive[drive]; 3065 /* If no drive, skip */ 3066 if ((drvp->drive_flags & DRIVE) == 0) 3067 continue; 3068 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3069 (drvp->drive_flags & DRIVE_UDMA) == 0)) 3070 goto pio; 3071 3072 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 3073 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 3074 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 3075 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 3076 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 3077 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 3078 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 3079 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 3080 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 3081 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 3082 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 3083 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 3084 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 3085 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 3086 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 3087 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 3088 ideconf |= PIIX_CONFIG_PINGPONG; 3089 } 3090 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 3091 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 3092 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 3093 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 3094 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 3095 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 3096 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 3097 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 3098 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 3099 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 3100 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 3101 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 3102 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 3103 /* setup Ultra/100 */ 3104 if (drvp->UDMA_mode > 2 && 3105 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 3106 drvp->UDMA_mode = 2; 3107 if (drvp->UDMA_mode > 4) { 3108 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 3109 } else { 3110 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 3111 if (drvp->UDMA_mode > 2) { 3112 ideconf |= PIIX_CONFIG_UDMA66(channel, 3113 drive); 3114 } else { 3115 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 3116 drive); 3117 } 3118 } 3119 } 3120 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 3121 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 3122 /* setup Ultra/66 */ 3123 if (drvp->UDMA_mode > 2 && 3124 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 3125 drvp->UDMA_mode = 2; 3126 if (drvp->UDMA_mode > 2) 3127 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 3128 else 3129 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 3130 } 3131 3132 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3133 (drvp->drive_flags & DRIVE_UDMA)) { 3134 /* use Ultra/DMA */ 3135 drvp->drive_flags &= ~DRIVE_DMA; 3136 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 3137 udmareg |= PIIX_UDMATIM_SET( 3138 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 3139 } else { 3140 /* use Multiword DMA */ 3141 drvp->drive_flags &= ~DRIVE_UDMA; 3142 if (drive == 0) { 3143 idetim |= piix_setup_idetim_timings( 3144 drvp->DMA_mode, 1, channel); 3145 } else { 3146 sidetim |= piix_setup_sidetim_timings( 3147 drvp->DMA_mode, 1, channel); 3148 idetim = PIIX_IDETIM_SET(idetim, 3149 PIIX_IDETIM_SITRE, channel); 3150 } 3151 } 3152 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3153 3154 pio: /* use PIO mode */ 3155 idetim |= piix_setup_idetim_drvs(drvp); 3156 if (drive == 0) { 3157 idetim |= piix_setup_idetim_timings( 3158 drvp->PIO_mode, 0, channel); 3159 } else { 3160 sidetim |= piix_setup_sidetim_timings( 3161 drvp->PIO_mode, 0, channel); 3162 idetim = PIIX_IDETIM_SET(idetim, 3163 PIIX_IDETIM_SITRE, channel); 3164 } 3165 } 3166 if (idedma_ctl != 0) { 3167 /* Add software bits in status register */ 3168 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3169 IDEDMA_CTL(channel), 3170 idedma_ctl); 3171 } 3172 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 3173 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 3174 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 3175 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 3176 pciide_print_modes(cp); 3177 } 3178 3179 3180 /* setup ISP and RTC fields, based on mode */ 3181 u_int32_t 3182 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3183 { 3184 3185 if (dma) 3186 return (PIIX_IDETIM_SET(0, 3187 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 3188 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 3189 channel)); 3190 else 3191 return (PIIX_IDETIM_SET(0, 3192 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 3193 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 3194 channel)); 3195 } 3196 3197 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 3198 u_int32_t 3199 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 3200 { 3201 u_int32_t ret = 0; 3202 struct channel_softc *chp = drvp->chnl_softc; 3203 u_int8_t channel = chp->channel; 3204 u_int8_t drive = drvp->drive; 3205 3206 /* 3207 * If drive is using UDMA, timings setups are independant 3208 * So just check DMA and PIO here. 3209 */ 3210 if (drvp->drive_flags & DRIVE_DMA) { 3211 /* if mode = DMA mode 0, use compatible timings */ 3212 if ((drvp->drive_flags & DRIVE_DMA) && 3213 drvp->DMA_mode == 0) { 3214 drvp->PIO_mode = 0; 3215 return (ret); 3216 } 3217 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3218 /* 3219 * PIO and DMA timings are the same, use fast timings for PIO 3220 * too, else use compat timings. 3221 */ 3222 if ((piix_isp_pio[drvp->PIO_mode] != 3223 piix_isp_dma[drvp->DMA_mode]) || 3224 (piix_rtc_pio[drvp->PIO_mode] != 3225 piix_rtc_dma[drvp->DMA_mode])) 3226 drvp->PIO_mode = 0; 3227 /* if PIO mode <= 2, use compat timings for PIO */ 3228 if (drvp->PIO_mode <= 2) { 3229 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 3230 channel); 3231 return (ret); 3232 } 3233 } 3234 3235 /* 3236 * Now setup PIO modes. If mode < 2, use compat timings. 3237 * Else enable fast timings. Enable IORDY and prefetch/post 3238 * if PIO mode >= 3. 3239 */ 3240 3241 if (drvp->PIO_mode < 2) 3242 return (ret); 3243 3244 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3245 if (drvp->PIO_mode >= 3) { 3246 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 3247 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 3248 } 3249 return (ret); 3250 } 3251 3252 /* setup values in SIDETIM registers, based on mode */ 3253 u_int32_t 3254 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3255 { 3256 if (dma) 3257 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 3258 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 3259 else 3260 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 3261 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 3262 } 3263 3264 void 3265 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3266 { 3267 struct pciide_channel *cp; 3268 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3269 int channel; 3270 pcireg_t chanenable; 3271 bus_size_t cmdsize, ctlsize; 3272 3273 printf(": DMA"); 3274 pciide_mapreg_dma(sc, pa); 3275 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3276 WDC_CAPABILITY_MODE; 3277 if (sc->sc_dma_ok) { 3278 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3279 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3280 sc->sc_wdcdev.irqack = pciide_irqack; 3281 } 3282 sc->sc_wdcdev.PIO_cap = 4; 3283 sc->sc_wdcdev.DMA_cap = 2; 3284 switch (sc->sc_pp->ide_product) { 3285 case PCI_PRODUCT_AMD_8111_IDE: 3286 sc->sc_wdcdev.UDMA_cap = 6; 3287 break; 3288 case PCI_PRODUCT_AMD_766_IDE: 3289 case PCI_PRODUCT_AMD_PBC768_IDE: 3290 sc->sc_wdcdev.UDMA_cap = 5; 3291 break; 3292 default: 3293 sc->sc_wdcdev.UDMA_cap = 4; 3294 break; 3295 } 3296 sc->sc_wdcdev.set_modes = amd756_setup_channel; 3297 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3298 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3299 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 3300 3301 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3302 3303 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3304 cp = &sc->pciide_channels[channel]; 3305 if (pciide_chansetup(sc, channel, interface) == 0) 3306 continue; 3307 3308 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 3309 printf("%s: %s ignored (disabled)\n", 3310 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3311 cp->hw_ok = 0; 3312 continue; 3313 } 3314 pciide_map_compat_intr(pa, cp, channel, interface); 3315 if (cp->hw_ok == 0) 3316 continue; 3317 3318 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3319 pciide_pci_intr); 3320 3321 if (pciide_chan_candisable(cp)) { 3322 chanenable &= ~AMD756_CHAN_EN(channel); 3323 } 3324 if (cp->hw_ok == 0) { 3325 pciide_unmap_compat_intr(pa, cp, channel, interface); 3326 continue; 3327 } 3328 3329 amd756_setup_channel(&cp->wdc_channel); 3330 } 3331 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 3332 chanenable); 3333 return; 3334 } 3335 3336 void 3337 amd756_setup_channel(struct channel_softc *chp) 3338 { 3339 u_int32_t udmatim_reg, datatim_reg; 3340 u_int8_t idedma_ctl; 3341 int mode, drive; 3342 struct ata_drive_datas *drvp; 3343 struct pciide_channel *cp = (struct pciide_channel *)chp; 3344 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3345 pcireg_t chanenable; 3346 #ifndef PCIIDE_AMD756_ENABLEDMA 3347 int product = sc->sc_pp->ide_product; 3348 int rev = sc->sc_rev; 3349 #endif 3350 3351 idedma_ctl = 0; 3352 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 3353 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 3354 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 3355 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 3356 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 3357 AMD756_CHANSTATUS_EN); 3358 3359 /* setup DMA if needed */ 3360 pciide_channel_dma_setup(cp); 3361 3362 for (drive = 0; drive < 2; drive++) { 3363 drvp = &chp->ch_drive[drive]; 3364 /* If no drive, skip */ 3365 if ((drvp->drive_flags & DRIVE) == 0) 3366 continue; 3367 /* add timing values, setup DMA if needed */ 3368 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3369 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3370 mode = drvp->PIO_mode; 3371 goto pio; 3372 } 3373 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3374 (drvp->drive_flags & DRIVE_UDMA)) { 3375 /* use Ultra/DMA */ 3376 drvp->drive_flags &= ~DRIVE_DMA; 3377 3378 /* Check cable */ 3379 if ((chanenable & AMD756_CABLE(chp->channel, 3380 drive)) == 0 && drvp->UDMA_mode > 2) { 3381 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 3382 "cable not detected\n", drvp->drive_name, 3383 sc->sc_wdcdev.sc_dev.dv_xname, 3384 chp->channel, drive), DEBUG_PROBE); 3385 drvp->UDMA_mode = 2; 3386 } 3387 3388 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 3389 AMD756_UDMA_EN_MTH(chp->channel, drive) | 3390 AMD756_UDMA_TIME(chp->channel, drive, 3391 amd756_udma_tim[drvp->UDMA_mode]); 3392 /* can use PIO timings, MW DMA unused */ 3393 mode = drvp->PIO_mode; 3394 } else { 3395 /* use Multiword DMA, but only if revision is OK */ 3396 drvp->drive_flags &= ~DRIVE_UDMA; 3397 #ifndef PCIIDE_AMD756_ENABLEDMA 3398 /* 3399 * The workaround doesn't seem to be necessary 3400 * with all drives, so it can be disabled by 3401 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 3402 * triggered. 3403 */ 3404 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 3405 printf("%s:%d:%d: multi-word DMA disabled due " 3406 "to chip revision\n", 3407 sc->sc_wdcdev.sc_dev.dv_xname, 3408 chp->channel, drive); 3409 mode = drvp->PIO_mode; 3410 drvp->drive_flags &= ~DRIVE_DMA; 3411 goto pio; 3412 } 3413 #endif 3414 /* mode = min(pio, dma+2) */ 3415 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3416 mode = drvp->PIO_mode; 3417 else 3418 mode = drvp->DMA_mode + 2; 3419 } 3420 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3421 3422 pio: /* setup PIO mode */ 3423 if (mode <= 2) { 3424 drvp->DMA_mode = 0; 3425 drvp->PIO_mode = 0; 3426 mode = 0; 3427 } else { 3428 drvp->PIO_mode = mode; 3429 drvp->DMA_mode = mode - 2; 3430 } 3431 datatim_reg |= 3432 AMD756_DATATIM_PULSE(chp->channel, drive, 3433 amd756_pio_set[mode]) | 3434 AMD756_DATATIM_RECOV(chp->channel, drive, 3435 amd756_pio_rec[mode]); 3436 } 3437 if (idedma_ctl != 0) { 3438 /* Add software bits in status register */ 3439 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3440 IDEDMA_CTL(chp->channel), 3441 idedma_ctl); 3442 } 3443 pciide_print_modes(cp); 3444 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3445 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3446 } 3447 3448 void 3449 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3450 { 3451 struct pciide_channel *cp; 3452 pcireg_t interface; 3453 int no_ideconf = 0, channel; 3454 u_int32_t ideconf; 3455 bus_size_t cmdsize, ctlsize; 3456 pcitag_t tag; 3457 pcireg_t id, class; 3458 3459 /* 3460 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3461 */ 3462 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3463 interface = PCI_INTERFACE(pa->pa_class); 3464 } else { 3465 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3466 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3467 } 3468 3469 switch (PCI_PRODUCT(pa->pa_id)) { 3470 case PCI_PRODUCT_VIATECH_VT6410: 3471 case PCI_PRODUCT_VIATECH_VT6415: 3472 no_ideconf = 1; 3473 /* FALLTHROUGH */ 3474 case PCI_PRODUCT_VIATECH_CX700_IDE: 3475 case PCI_PRODUCT_VIATECH_VX700_IDE: 3476 case PCI_PRODUCT_VIATECH_VX855_IDE: 3477 case PCI_PRODUCT_VIATECH_VX900_IDE: 3478 printf(": ATA133"); 3479 sc->sc_wdcdev.UDMA_cap = 6; 3480 break; 3481 default: 3482 /* 3483 * Determine the DMA capabilities by looking at the 3484 * ISA bridge. 3485 */ 3486 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3487 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3488 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3489 3490 /* 3491 * XXX On the VT8237, the ISA bridge is on a different 3492 * device. 3493 */ 3494 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3495 pa->pa_device == 15) { 3496 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3497 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3498 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3499 } 3500 3501 switch (PCI_PRODUCT(id)) { 3502 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3503 if (PCI_REVISION(class) >= 0x02) { 3504 printf(": ATA33"); 3505 sc->sc_wdcdev.UDMA_cap = 2; 3506 } else { 3507 printf(": DMA"); 3508 sc->sc_wdcdev.UDMA_cap = 0; 3509 } 3510 break; 3511 case PCI_PRODUCT_VIATECH_VT82C596A: 3512 if (PCI_REVISION(class) >= 0x12) { 3513 printf(": ATA66"); 3514 sc->sc_wdcdev.UDMA_cap = 4; 3515 } else { 3516 printf(": ATA33"); 3517 sc->sc_wdcdev.UDMA_cap = 2; 3518 } 3519 break; 3520 3521 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3522 if (PCI_REVISION(class) >= 0x40) { 3523 printf(": ATA100"); 3524 sc->sc_wdcdev.UDMA_cap = 5; 3525 } else { 3526 printf(": ATA66"); 3527 sc->sc_wdcdev.UDMA_cap = 4; 3528 } 3529 break; 3530 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3531 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3532 printf(": ATA100"); 3533 sc->sc_wdcdev.UDMA_cap = 5; 3534 break; 3535 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3536 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3537 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3538 printf(": ATA133"); 3539 sc->sc_wdcdev.UDMA_cap = 6; 3540 break; 3541 default: 3542 printf(": DMA"); 3543 sc->sc_wdcdev.UDMA_cap = 0; 3544 break; 3545 } 3546 break; 3547 } 3548 3549 pciide_mapreg_dma(sc, pa); 3550 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3551 WDC_CAPABILITY_MODE; 3552 if (sc->sc_dma_ok) { 3553 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3554 sc->sc_wdcdev.irqack = pciide_irqack; 3555 if (sc->sc_wdcdev.UDMA_cap > 0) 3556 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3557 } 3558 sc->sc_wdcdev.PIO_cap = 4; 3559 sc->sc_wdcdev.DMA_cap = 2; 3560 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3561 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3562 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3563 3564 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3565 3566 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3567 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3568 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3569 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3570 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3571 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3572 DEBUG_PROBE); 3573 3574 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3575 cp = &sc->pciide_channels[channel]; 3576 if (pciide_chansetup(sc, channel, interface) == 0) 3577 continue; 3578 3579 if (no_ideconf == 0) { 3580 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, 3581 APO_IDECONF); 3582 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3583 printf("%s: %s ignored (disabled)\n", 3584 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3585 cp->hw_ok = 0; 3586 continue; 3587 } 3588 } 3589 pciide_map_compat_intr(pa, cp, channel, interface); 3590 if (cp->hw_ok == 0) 3591 continue; 3592 3593 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3594 pciide_pci_intr); 3595 if (cp->hw_ok == 0) { 3596 goto next; 3597 } 3598 if (pciide_chan_candisable(cp)) { 3599 if (no_ideconf == 0) { 3600 ideconf &= ~APO_IDECONF_EN(channel); 3601 pci_conf_write(sc->sc_pc, sc->sc_tag, 3602 APO_IDECONF, ideconf); 3603 } 3604 } 3605 3606 if (cp->hw_ok == 0) 3607 goto next; 3608 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3609 next: 3610 if (cp->hw_ok == 0) 3611 pciide_unmap_compat_intr(pa, cp, channel, interface); 3612 } 3613 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3614 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3615 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3616 } 3617 3618 void 3619 apollo_setup_channel(struct channel_softc *chp) 3620 { 3621 u_int32_t udmatim_reg, datatim_reg; 3622 u_int8_t idedma_ctl; 3623 int mode, drive; 3624 struct ata_drive_datas *drvp; 3625 struct pciide_channel *cp = (struct pciide_channel *)chp; 3626 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3627 3628 idedma_ctl = 0; 3629 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3630 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3631 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3632 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3633 3634 /* setup DMA if needed */ 3635 pciide_channel_dma_setup(cp); 3636 3637 /* 3638 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3639 * downgrade to Ultra/33 if needed 3640 */ 3641 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3642 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3643 /* both drives UDMA */ 3644 if (chp->ch_drive[0].UDMA_mode > 2 && 3645 chp->ch_drive[1].UDMA_mode <= 2) { 3646 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3647 chp->ch_drive[0].UDMA_mode = 2; 3648 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3649 chp->ch_drive[0].UDMA_mode <= 2) { 3650 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3651 chp->ch_drive[1].UDMA_mode = 2; 3652 } 3653 } 3654 3655 for (drive = 0; drive < 2; drive++) { 3656 drvp = &chp->ch_drive[drive]; 3657 /* If no drive, skip */ 3658 if ((drvp->drive_flags & DRIVE) == 0) 3659 continue; 3660 /* add timing values, setup DMA if needed */ 3661 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3662 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3663 mode = drvp->PIO_mode; 3664 goto pio; 3665 } 3666 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3667 (drvp->drive_flags & DRIVE_UDMA)) { 3668 /* use Ultra/DMA */ 3669 drvp->drive_flags &= ~DRIVE_DMA; 3670 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3671 APO_UDMA_EN_MTH(chp->channel, drive); 3672 if (sc->sc_wdcdev.UDMA_cap == 6) { 3673 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3674 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3675 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3676 /* 686b */ 3677 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3678 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3679 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3680 /* 596b or 686a */ 3681 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3682 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3683 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3684 } else { 3685 /* 596a or 586b */ 3686 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3687 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3688 } 3689 /* can use PIO timings, MW DMA unused */ 3690 mode = drvp->PIO_mode; 3691 } else { 3692 /* use Multiword DMA */ 3693 drvp->drive_flags &= ~DRIVE_UDMA; 3694 /* mode = min(pio, dma+2) */ 3695 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3696 mode = drvp->PIO_mode; 3697 else 3698 mode = drvp->DMA_mode + 2; 3699 } 3700 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3701 3702 pio: /* setup PIO mode */ 3703 if (mode <= 2) { 3704 drvp->DMA_mode = 0; 3705 drvp->PIO_mode = 0; 3706 mode = 0; 3707 } else { 3708 drvp->PIO_mode = mode; 3709 drvp->DMA_mode = mode - 2; 3710 } 3711 datatim_reg |= 3712 APO_DATATIM_PULSE(chp->channel, drive, 3713 apollo_pio_set[mode]) | 3714 APO_DATATIM_RECOV(chp->channel, drive, 3715 apollo_pio_rec[mode]); 3716 } 3717 if (idedma_ctl != 0) { 3718 /* Add software bits in status register */ 3719 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3720 IDEDMA_CTL(chp->channel), 3721 idedma_ctl); 3722 } 3723 pciide_print_modes(cp); 3724 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3725 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3726 } 3727 3728 void 3729 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3730 int channel) 3731 { 3732 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3733 bus_size_t cmdsize, ctlsize; 3734 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3735 pcireg_t interface; 3736 int one_channel; 3737 3738 /* 3739 * The 0648/0649 can be told to identify as a RAID controller. 3740 * In this case, we have to fake interface 3741 */ 3742 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3743 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3744 PCIIDE_INTERFACE_SETTABLE(1); 3745 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3746 CMD_CONF_DSA1) 3747 interface |= PCIIDE_INTERFACE_PCI(0) | 3748 PCIIDE_INTERFACE_PCI(1); 3749 } else { 3750 interface = PCI_INTERFACE(pa->pa_class); 3751 } 3752 3753 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3754 cp->name = PCIIDE_CHANNEL_NAME(channel); 3755 cp->wdc_channel.channel = channel; 3756 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3757 3758 /* 3759 * Older CMD64X doesn't have independant channels 3760 */ 3761 switch (sc->sc_pp->ide_product) { 3762 case PCI_PRODUCT_CMDTECH_649: 3763 one_channel = 0; 3764 break; 3765 default: 3766 one_channel = 1; 3767 break; 3768 } 3769 3770 if (channel > 0 && one_channel) { 3771 cp->wdc_channel.ch_queue = 3772 sc->pciide_channels[0].wdc_channel.ch_queue; 3773 } else { 3774 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 3775 } 3776 if (cp->wdc_channel.ch_queue == NULL) { 3777 printf( 3778 "%s: %s cannot allocate channel queue", 3779 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3780 return; 3781 } 3782 3783 /* 3784 * with a CMD PCI64x, if we get here, the first channel is enabled: 3785 * there's no way to disable the first channel without disabling 3786 * the whole device 3787 */ 3788 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3789 printf("%s: %s ignored (disabled)\n", 3790 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3791 cp->hw_ok = 0; 3792 return; 3793 } 3794 cp->hw_ok = 1; 3795 pciide_map_compat_intr(pa, cp, channel, interface); 3796 if (cp->hw_ok == 0) 3797 return; 3798 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3799 if (cp->hw_ok == 0) { 3800 pciide_unmap_compat_intr(pa, cp, channel, interface); 3801 return; 3802 } 3803 if (pciide_chan_candisable(cp)) { 3804 if (channel == 1) { 3805 ctrl &= ~CMD_CTRL_2PORT; 3806 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3807 CMD_CTRL, ctrl); 3808 pciide_unmap_compat_intr(pa, cp, channel, interface); 3809 } 3810 } 3811 } 3812 3813 int 3814 cmd_pci_intr(void *arg) 3815 { 3816 struct pciide_softc *sc = arg; 3817 struct pciide_channel *cp; 3818 struct channel_softc *wdc_cp; 3819 int i, rv, crv; 3820 u_int32_t priirq, secirq; 3821 3822 rv = 0; 3823 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3824 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3825 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3826 cp = &sc->pciide_channels[i]; 3827 wdc_cp = &cp->wdc_channel; 3828 /* If a compat channel skip. */ 3829 if (cp->compat) 3830 continue; 3831 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3832 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3833 crv = wdcintr(wdc_cp); 3834 if (crv == 0) { 3835 #if 0 3836 printf("%s:%d: bogus intr\n", 3837 sc->sc_wdcdev.sc_dev.dv_xname, i); 3838 #endif 3839 } else 3840 rv = 1; 3841 } 3842 } 3843 return (rv); 3844 } 3845 3846 void 3847 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3848 { 3849 int channel; 3850 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3851 3852 printf(": no DMA"); 3853 sc->sc_dma_ok = 0; 3854 3855 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3856 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3857 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3858 3859 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3860 3861 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3862 cmd_channel_map(pa, sc, channel); 3863 } 3864 } 3865 3866 void 3867 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3868 { 3869 struct pciide_channel *cp; 3870 int channel; 3871 int rev = sc->sc_rev; 3872 pcireg_t interface; 3873 3874 /* 3875 * The 0648/0649 can be told to identify as a RAID controller. 3876 * In this case, we have to fake interface 3877 */ 3878 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3879 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3880 PCIIDE_INTERFACE_SETTABLE(1); 3881 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3882 CMD_CONF_DSA1) 3883 interface |= PCIIDE_INTERFACE_PCI(0) | 3884 PCIIDE_INTERFACE_PCI(1); 3885 } else { 3886 interface = PCI_INTERFACE(pa->pa_class); 3887 } 3888 3889 printf(": DMA"); 3890 pciide_mapreg_dma(sc, pa); 3891 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3892 WDC_CAPABILITY_MODE; 3893 if (sc->sc_dma_ok) { 3894 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3895 switch (sc->sc_pp->ide_product) { 3896 case PCI_PRODUCT_CMDTECH_649: 3897 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3898 sc->sc_wdcdev.UDMA_cap = 5; 3899 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3900 break; 3901 case PCI_PRODUCT_CMDTECH_648: 3902 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3903 sc->sc_wdcdev.UDMA_cap = 4; 3904 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3905 break; 3906 case PCI_PRODUCT_CMDTECH_646: 3907 if (rev >= CMD0646U2_REV) { 3908 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3909 sc->sc_wdcdev.UDMA_cap = 2; 3910 } else if (rev >= CMD0646U_REV) { 3911 /* 3912 * Linux's driver claims that the 646U is broken 3913 * with UDMA. Only enable it if we know what we're 3914 * doing 3915 */ 3916 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3917 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3918 sc->sc_wdcdev.UDMA_cap = 2; 3919 #endif 3920 /* explicitly disable UDMA */ 3921 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3922 CMD_UDMATIM(0), 0); 3923 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3924 CMD_UDMATIM(1), 0); 3925 } 3926 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3927 break; 3928 default: 3929 sc->sc_wdcdev.irqack = pciide_irqack; 3930 } 3931 } 3932 3933 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3934 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3935 sc->sc_wdcdev.PIO_cap = 4; 3936 sc->sc_wdcdev.DMA_cap = 2; 3937 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3938 3939 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3940 3941 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3942 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3943 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3944 DEBUG_PROBE); 3945 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3946 cp = &sc->pciide_channels[channel]; 3947 cmd_channel_map(pa, sc, channel); 3948 if (cp->hw_ok == 0) 3949 continue; 3950 cmd0643_9_setup_channel(&cp->wdc_channel); 3951 } 3952 /* 3953 * note - this also makes sure we clear the irq disable and reset 3954 * bits 3955 */ 3956 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3957 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3958 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3959 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3960 DEBUG_PROBE); 3961 } 3962 3963 void 3964 cmd0643_9_setup_channel(struct channel_softc *chp) 3965 { 3966 struct ata_drive_datas *drvp; 3967 u_int8_t tim; 3968 u_int32_t idedma_ctl, udma_reg; 3969 int drive; 3970 struct pciide_channel *cp = (struct pciide_channel *)chp; 3971 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3972 3973 idedma_ctl = 0; 3974 /* setup DMA if needed */ 3975 pciide_channel_dma_setup(cp); 3976 3977 for (drive = 0; drive < 2; drive++) { 3978 drvp = &chp->ch_drive[drive]; 3979 /* If no drive, skip */ 3980 if ((drvp->drive_flags & DRIVE) == 0) 3981 continue; 3982 /* add timing values, setup DMA if needed */ 3983 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3984 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3985 if (drvp->drive_flags & DRIVE_UDMA) { 3986 /* UltraDMA on a 646U2, 0648 or 0649 */ 3987 drvp->drive_flags &= ~DRIVE_DMA; 3988 udma_reg = pciide_pci_read(sc->sc_pc, 3989 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3990 if (drvp->UDMA_mode > 2 && 3991 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3992 CMD_BICSR) & 3993 CMD_BICSR_80(chp->channel)) == 0) { 3994 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3995 "80-wire cable not detected\n", 3996 drvp->drive_name, 3997 sc->sc_wdcdev.sc_dev.dv_xname, 3998 chp->channel, drive), DEBUG_PROBE); 3999 drvp->UDMA_mode = 2; 4000 } 4001 if (drvp->UDMA_mode > 2) 4002 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 4003 else if (sc->sc_wdcdev.UDMA_cap > 2) 4004 udma_reg |= CMD_UDMATIM_UDMA33(drive); 4005 udma_reg |= CMD_UDMATIM_UDMA(drive); 4006 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 4007 CMD_UDMATIM_TIM_OFF(drive)); 4008 udma_reg |= 4009 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 4010 CMD_UDMATIM_TIM_OFF(drive)); 4011 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4012 CMD_UDMATIM(chp->channel), udma_reg); 4013 } else { 4014 /* 4015 * use Multiword DMA. 4016 * Timings will be used for both PIO and DMA, 4017 * so adjust DMA mode if needed 4018 * if we have a 0646U2/8/9, turn off UDMA 4019 */ 4020 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 4021 udma_reg = pciide_pci_read(sc->sc_pc, 4022 sc->sc_tag, 4023 CMD_UDMATIM(chp->channel)); 4024 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 4025 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4026 CMD_UDMATIM(chp->channel), 4027 udma_reg); 4028 } 4029 if (drvp->PIO_mode >= 3 && 4030 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 4031 drvp->DMA_mode = drvp->PIO_mode - 2; 4032 } 4033 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 4034 } 4035 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4036 } 4037 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4038 CMD_DATA_TIM(chp->channel, drive), tim); 4039 } 4040 if (idedma_ctl != 0) { 4041 /* Add software bits in status register */ 4042 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4043 IDEDMA_CTL(chp->channel), 4044 idedma_ctl); 4045 } 4046 pciide_print_modes(cp); 4047 #ifdef __sparc64__ 4048 /* 4049 * The Ultra 5 has a tendency to hang during reboot. This is due 4050 * to the PCI0646U asserting a PCI interrupt line when the chip 4051 * registers claim that it is not. Performing a reset at this 4052 * point appears to eliminate the symptoms. It is likely the 4053 * real cause is still lurking somewhere in the code. 4054 */ 4055 wdcreset(chp, SILENT); 4056 #endif /* __sparc64__ */ 4057 } 4058 4059 void 4060 cmd646_9_irqack(struct channel_softc *chp) 4061 { 4062 u_int32_t priirq, secirq; 4063 struct pciide_channel *cp = (struct pciide_channel *)chp; 4064 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4065 4066 if (chp->channel == 0) { 4067 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 4068 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 4069 } else { 4070 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 4071 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 4072 } 4073 pciide_irqack(chp); 4074 } 4075 4076 void 4077 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4078 { 4079 struct pciide_channel *cp; 4080 int channel; 4081 4082 printf("\n%s: bus-master DMA support present", 4083 sc->sc_wdcdev.sc_dev.dv_xname); 4084 pciide_mapreg_dma(sc, pa); 4085 printf("\n"); 4086 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4087 WDC_CAPABILITY_MODE; 4088 if (sc->sc_dma_ok) { 4089 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4090 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4091 sc->sc_wdcdev.UDMA_cap = 6; 4092 sc->sc_wdcdev.irqack = pciide_irqack; 4093 } 4094 4095 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4096 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4097 sc->sc_wdcdev.PIO_cap = 4; 4098 sc->sc_wdcdev.DMA_cap = 2; 4099 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 4100 4101 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 4102 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 4103 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 4104 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 4105 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4106 cp = &sc->pciide_channels[channel]; 4107 cmd680_channel_map(pa, sc, channel); 4108 if (cp->hw_ok == 0) 4109 continue; 4110 cmd680_setup_channel(&cp->wdc_channel); 4111 } 4112 } 4113 4114 void 4115 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 4116 int channel) 4117 { 4118 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4119 bus_size_t cmdsize, ctlsize; 4120 int interface, i, reg; 4121 static const u_int8_t init_val[] = 4122 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 4123 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 4124 4125 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 4126 interface = PCIIDE_INTERFACE_SETTABLE(0) | 4127 PCIIDE_INTERFACE_SETTABLE(1); 4128 interface |= PCIIDE_INTERFACE_PCI(0) | 4129 PCIIDE_INTERFACE_PCI(1); 4130 } else { 4131 interface = PCI_INTERFACE(pa->pa_class); 4132 } 4133 4134 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4135 cp->name = PCIIDE_CHANNEL_NAME(channel); 4136 cp->wdc_channel.channel = channel; 4137 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4138 4139 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4140 if (cp->wdc_channel.ch_queue == NULL) { 4141 printf("%s %s: " 4142 "cannot allocate channel queue", 4143 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4144 return; 4145 } 4146 4147 /* XXX */ 4148 reg = 0xa2 + channel * 16; 4149 for (i = 0; i < sizeof(init_val); i++) 4150 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 4151 4152 printf("%s: %s %s to %s mode\n", 4153 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 4154 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 4155 "configured" : "wired", 4156 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 4157 "native-PCI" : "compatibility"); 4158 4159 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 4160 if (cp->hw_ok == 0) 4161 return; 4162 pciide_map_compat_intr(pa, cp, channel, interface); 4163 } 4164 4165 void 4166 cmd680_setup_channel(struct channel_softc *chp) 4167 { 4168 struct ata_drive_datas *drvp; 4169 u_int8_t mode, off, scsc; 4170 u_int16_t val; 4171 u_int32_t idedma_ctl; 4172 int drive; 4173 struct pciide_channel *cp = (struct pciide_channel *)chp; 4174 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4175 pci_chipset_tag_t pc = sc->sc_pc; 4176 pcitag_t pa = sc->sc_tag; 4177 static const u_int8_t udma2_tbl[] = 4178 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 4179 static const u_int8_t udma_tbl[] = 4180 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 4181 static const u_int16_t dma_tbl[] = 4182 { 0x2208, 0x10c2, 0x10c1 }; 4183 static const u_int16_t pio_tbl[] = 4184 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 4185 4186 idedma_ctl = 0; 4187 pciide_channel_dma_setup(cp); 4188 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 4189 4190 for (drive = 0; drive < 2; drive++) { 4191 drvp = &chp->ch_drive[drive]; 4192 /* If no drive, skip */ 4193 if ((drvp->drive_flags & DRIVE) == 0) 4194 continue; 4195 mode &= ~(0x03 << (drive * 4)); 4196 if (drvp->drive_flags & DRIVE_UDMA) { 4197 drvp->drive_flags &= ~DRIVE_DMA; 4198 off = 0xa0 + chp->channel * 16; 4199 if (drvp->UDMA_mode > 2 && 4200 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 4201 drvp->UDMA_mode = 2; 4202 scsc = pciide_pci_read(pc, pa, 0x8a); 4203 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 4204 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 4205 scsc = pciide_pci_read(pc, pa, 0x8a); 4206 if ((scsc & 0x30) == 0) 4207 drvp->UDMA_mode = 5; 4208 } 4209 mode |= 0x03 << (drive * 4); 4210 off = 0xac + chp->channel * 16 + drive * 2; 4211 val = pciide_pci_read(pc, pa, off) & ~0x3f; 4212 if (scsc & 0x30) 4213 val |= udma2_tbl[drvp->UDMA_mode]; 4214 else 4215 val |= udma_tbl[drvp->UDMA_mode]; 4216 pciide_pci_write(pc, pa, off, val); 4217 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4218 } else if (drvp->drive_flags & DRIVE_DMA) { 4219 mode |= 0x02 << (drive * 4); 4220 off = 0xa8 + chp->channel * 16 + drive * 2; 4221 val = dma_tbl[drvp->DMA_mode]; 4222 pciide_pci_write(pc, pa, off, val & 0xff); 4223 pciide_pci_write(pc, pa, off, val >> 8); 4224 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4225 } else { 4226 mode |= 0x01 << (drive * 4); 4227 off = 0xa4 + chp->channel * 16 + drive * 2; 4228 val = pio_tbl[drvp->PIO_mode]; 4229 pciide_pci_write(pc, pa, off, val & 0xff); 4230 pciide_pci_write(pc, pa, off, val >> 8); 4231 } 4232 } 4233 4234 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 4235 if (idedma_ctl != 0) { 4236 /* Add software bits in status register */ 4237 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4238 IDEDMA_CTL(chp->channel), 4239 idedma_ctl); 4240 } 4241 pciide_print_modes(cp); 4242 } 4243 4244 /* 4245 * When the Silicon Image 3112 retries a PCI memory read command, 4246 * it may retry it as a memory read multiple command under some 4247 * circumstances. This can totally confuse some PCI controllers, 4248 * so ensure that it will never do this by making sure that the 4249 * Read Threshold (FIFO Read Request Control) field of the FIFO 4250 * Valid Byte Count and Control registers for both channels (BA5 4251 * offset 0x40 and 0x44) are set to be at least as large as the 4252 * cacheline size register. 4253 */ 4254 void 4255 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 4256 { 4257 pcireg_t cls, reg40, reg44; 4258 4259 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4260 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 4261 cls *= 4; 4262 if (cls > 224) { 4263 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4264 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 4265 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 4266 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 4267 cls = 224; 4268 } 4269 if (cls < 32) 4270 cls = 32; 4271 cls = (cls + 31) / 32; 4272 reg40 = ba5_read_4(sc, 0x40); 4273 reg44 = ba5_read_4(sc, 0x44); 4274 if ((reg40 & 0x7) < cls) 4275 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 4276 if ((reg44 & 0x7) < cls) 4277 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 4278 } 4279 4280 void 4281 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4282 { 4283 struct pciide_channel *cp; 4284 bus_size_t cmdsize, ctlsize; 4285 pcireg_t interface, scs_cmd, cfgctl; 4286 int channel; 4287 struct pciide_satalink *sl; 4288 4289 /* Allocate memory for private data */ 4290 sc->sc_cookielen = sizeof(*sl); 4291 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 4292 sl = sc->sc_cookie; 4293 4294 sc->chip_unmap = default_chip_unmap; 4295 4296 #define SII3112_RESET_BITS \ 4297 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4298 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4299 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 4300 4301 /* 4302 * Reset everything and then unblock all of the interrupts. 4303 */ 4304 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4305 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4306 scs_cmd | SII3112_RESET_BITS); 4307 delay(50 * 1000); 4308 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4309 scs_cmd & SCS_CMD_BA5_EN); 4310 delay(50 * 1000); 4311 4312 if (scs_cmd & SCS_CMD_BA5_EN) { 4313 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4314 PCI_MAPREG_TYPE_MEM | 4315 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4316 &sl->ba5_st, &sl->ba5_sh, 4317 NULL, NULL, 0) != 0) 4318 printf(": unable to map BA5 register space\n"); 4319 else 4320 sl->ba5_en = 1; 4321 } else { 4322 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 4323 SII3112_PCI_CFGCTL); 4324 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 4325 cfgctl | CFGCTL_BA5INDEN); 4326 } 4327 4328 printf(": DMA"); 4329 pciide_mapreg_dma(sc, pa); 4330 printf("\n"); 4331 4332 /* 4333 * Rev. <= 0x01 of the 3112 have a bug that can cause data 4334 * corruption if DMA transfers cross an 8K boundary. This is 4335 * apparently hard to tickle, but we'll go ahead and play it 4336 * safe. 4337 */ 4338 if (sc->sc_rev <= 0x01) { 4339 sc->sc_dma_maxsegsz = 8192; 4340 sc->sc_dma_boundary = 8192; 4341 } 4342 4343 sii_fixup_cacheline(sc, pa); 4344 4345 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4346 sc->sc_wdcdev.PIO_cap = 4; 4347 if (sc->sc_dma_ok) { 4348 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4349 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4350 sc->sc_wdcdev.irqack = pciide_irqack; 4351 sc->sc_wdcdev.DMA_cap = 2; 4352 sc->sc_wdcdev.UDMA_cap = 6; 4353 } 4354 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4355 4356 /* We can use SControl and SStatus to probe for drives. */ 4357 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4358 4359 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4360 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4361 4362 /* 4363 * The 3112 either identifies itself as a RAID storage device 4364 * or a Misc storage device. Fake up the interface bits for 4365 * what our driver expects. 4366 */ 4367 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4368 interface = PCI_INTERFACE(pa->pa_class); 4369 } else { 4370 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4371 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4372 } 4373 4374 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4375 cp = &sc->pciide_channels[channel]; 4376 if (pciide_chansetup(sc, channel, interface) == 0) 4377 continue; 4378 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4379 pciide_pci_intr); 4380 if (cp->hw_ok == 0) 4381 continue; 4382 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4383 } 4384 } 4385 4386 void 4387 sii3112_setup_channel(struct channel_softc *chp) 4388 { 4389 struct ata_drive_datas *drvp; 4390 int drive; 4391 u_int32_t idedma_ctl, dtm; 4392 struct pciide_channel *cp = (struct pciide_channel *)chp; 4393 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4394 4395 /* setup DMA if needed */ 4396 pciide_channel_dma_setup(cp); 4397 4398 idedma_ctl = 0; 4399 dtm = 0; 4400 4401 for (drive = 0; drive < 2; drive++) { 4402 drvp = &chp->ch_drive[drive]; 4403 /* If no drive, skip */ 4404 if ((drvp->drive_flags & DRIVE) == 0) 4405 continue; 4406 if (drvp->drive_flags & DRIVE_UDMA) { 4407 /* use Ultra/DMA */ 4408 drvp->drive_flags &= ~DRIVE_DMA; 4409 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4410 dtm |= DTM_IDEx_DMA; 4411 } else if (drvp->drive_flags & DRIVE_DMA) { 4412 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4413 dtm |= DTM_IDEx_DMA; 4414 } else { 4415 dtm |= DTM_IDEx_PIO; 4416 } 4417 } 4418 4419 /* 4420 * Nothing to do to setup modes; it is meaningless in S-ATA 4421 * (but many S-ATA drives still want to get the SET_FEATURE 4422 * command). 4423 */ 4424 if (idedma_ctl != 0) { 4425 /* Add software bits in status register */ 4426 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 4427 } 4428 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 4429 pciide_print_modes(cp); 4430 } 4431 4432 void 4433 sii3112_drv_probe(struct channel_softc *chp) 4434 { 4435 struct pciide_channel *cp = (struct pciide_channel *)chp; 4436 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4437 uint32_t scontrol, sstatus; 4438 uint8_t scnt, sn, cl, ch; 4439 int s; 4440 4441 /* 4442 * The 3112 is a 2-port part, and only has one drive per channel 4443 * (each port emulates a master drive). 4444 * 4445 * The 3114 is similar, but has 4 channels. 4446 */ 4447 4448 /* 4449 * Request communication initialization sequence, any speed. 4450 * Performing this is the equivalent of an ATA Reset. 4451 */ 4452 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4453 4454 /* 4455 * XXX We don't yet support SATA power management; disable all 4456 * power management state transitions. 4457 */ 4458 scontrol |= SControl_IPM_NONE; 4459 4460 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4461 delay(50 * 1000); 4462 scontrol &= ~SControl_DET_INIT; 4463 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4464 delay(50 * 1000); 4465 4466 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4467 #if 0 4468 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4469 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4470 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4471 #endif 4472 switch (sstatus & SStatus_DET_mask) { 4473 case SStatus_DET_NODEV: 4474 /* No device; be silent. */ 4475 break; 4476 4477 case SStatus_DET_DEV_NE: 4478 printf("%s: port %d: device connected, but " 4479 "communication not established\n", 4480 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4481 break; 4482 4483 case SStatus_DET_OFFLINE: 4484 printf("%s: port %d: PHY offline\n", 4485 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4486 break; 4487 4488 case SStatus_DET_DEV: 4489 /* 4490 * XXX ATAPI detection doesn't currently work. Don't 4491 * XXX know why. But, it's not like the standard method 4492 * XXX can detect an ATAPI device connected via a SATA/PATA 4493 * XXX bridge, so at least this is no worse. --thorpej 4494 */ 4495 if (chp->_vtbl != NULL) 4496 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4497 else 4498 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4499 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4500 delay(10); /* 400ns delay */ 4501 /* Save register contents. */ 4502 if (chp->_vtbl != NULL) { 4503 scnt = CHP_READ_REG(chp, wdr_seccnt); 4504 sn = CHP_READ_REG(chp, wdr_sector); 4505 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4506 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4507 } else { 4508 scnt = bus_space_read_1(chp->cmd_iot, 4509 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4510 sn = bus_space_read_1(chp->cmd_iot, 4511 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4512 cl = bus_space_read_1(chp->cmd_iot, 4513 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4514 ch = bus_space_read_1(chp->cmd_iot, 4515 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4516 } 4517 #if 0 4518 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4519 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4520 scnt, sn, cl, ch); 4521 #endif 4522 /* 4523 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4524 * cases we get wrong values here, so ignore it. 4525 */ 4526 s = splbio(); 4527 if (cl == 0x14 && ch == 0xeb) 4528 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4529 else 4530 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4531 splx(s); 4532 4533 printf("%s: port %d", 4534 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4535 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4536 case 1: 4537 printf(": 1.5Gb/s"); 4538 break; 4539 case 2: 4540 printf(": 3.0Gb/s"); 4541 break; 4542 } 4543 printf("\n"); 4544 break; 4545 4546 default: 4547 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4548 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4549 } 4550 } 4551 4552 void 4553 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4554 { 4555 struct pciide_channel *cp; 4556 pcireg_t scs_cmd; 4557 pci_intr_handle_t intrhandle; 4558 const char *intrstr; 4559 int channel; 4560 struct pciide_satalink *sl; 4561 4562 /* Allocate memory for private data */ 4563 sc->sc_cookielen = sizeof(*sl); 4564 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 4565 sl = sc->sc_cookie; 4566 4567 #define SII3114_RESET_BITS \ 4568 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4569 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4570 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4571 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4572 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4573 4574 /* 4575 * Reset everything and then unblock all of the interrupts. 4576 */ 4577 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4578 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4579 scs_cmd | SII3114_RESET_BITS); 4580 delay(50 * 1000); 4581 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4582 scs_cmd & SCS_CMD_M66EN); 4583 delay(50 * 1000); 4584 4585 /* 4586 * On the 3114, the BA5 register space is always enabled. In 4587 * order to use the 3114 in any sane way, we must use this BA5 4588 * register space, and so we consider it an error if we cannot 4589 * map it. 4590 * 4591 * As a consequence of using BA5, our register mapping is different 4592 * from a normal PCI IDE controller's, and so we are unable to use 4593 * most of the common PCI IDE register mapping functions. 4594 */ 4595 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4596 PCI_MAPREG_TYPE_MEM | 4597 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4598 &sl->ba5_st, &sl->ba5_sh, 4599 NULL, NULL, 0) != 0) { 4600 printf(": unable to map BA5 register space\n"); 4601 return; 4602 } 4603 sl->ba5_en = 1; 4604 4605 /* 4606 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4607 * channel 2. This is required at all times for proper operation 4608 * when using the BA5 register space (otherwise interrupts from 4609 * all 4 channels won't work). 4610 */ 4611 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4612 4613 printf(": DMA"); 4614 sii3114_mapreg_dma(sc, pa); 4615 printf("\n"); 4616 4617 sii_fixup_cacheline(sc, pa); 4618 4619 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4620 sc->sc_wdcdev.PIO_cap = 4; 4621 if (sc->sc_dma_ok) { 4622 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4623 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4624 sc->sc_wdcdev.irqack = pciide_irqack; 4625 sc->sc_wdcdev.DMA_cap = 2; 4626 sc->sc_wdcdev.UDMA_cap = 6; 4627 } 4628 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4629 4630 /* We can use SControl and SStatus to probe for drives. */ 4631 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4632 4633 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4634 sc->sc_wdcdev.nchannels = 4; 4635 4636 /* Map and establish the interrupt handler. */ 4637 if (pci_intr_map(pa, &intrhandle) != 0) { 4638 printf("%s: couldn't map native-PCI interrupt\n", 4639 sc->sc_wdcdev.sc_dev.dv_xname); 4640 return; 4641 } 4642 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4643 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4644 /* XXX */ 4645 pciide_pci_intr, sc, 4646 sc->sc_wdcdev.sc_dev.dv_xname); 4647 if (sc->sc_pci_ih != NULL) { 4648 printf("%s: using %s for native-PCI interrupt\n", 4649 sc->sc_wdcdev.sc_dev.dv_xname, 4650 intrstr ? intrstr : "unknown interrupt"); 4651 } else { 4652 printf("%s: couldn't establish native-PCI interrupt", 4653 sc->sc_wdcdev.sc_dev.dv_xname); 4654 if (intrstr != NULL) 4655 printf(" at %s", intrstr); 4656 printf("\n"); 4657 return; 4658 } 4659 4660 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4661 cp = &sc->pciide_channels[channel]; 4662 if (sii3114_chansetup(sc, channel) == 0) 4663 continue; 4664 sii3114_mapchan(cp); 4665 if (cp->hw_ok == 0) 4666 continue; 4667 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4668 } 4669 } 4670 4671 void 4672 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4673 { 4674 int chan, reg; 4675 bus_size_t size; 4676 struct pciide_satalink *sl = sc->sc_cookie; 4677 4678 sc->sc_wdcdev.dma_arg = sc; 4679 sc->sc_wdcdev.dma_init = pciide_dma_init; 4680 sc->sc_wdcdev.dma_start = pciide_dma_start; 4681 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4682 4683 /* 4684 * Slice off a subregion of BA5 for each of the channel's DMA 4685 * registers. 4686 */ 4687 4688 sc->sc_dma_iot = sl->ba5_st; 4689 for (chan = 0; chan < 4; chan++) { 4690 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4691 size = 4; 4692 if (size > (IDEDMA_SCH_OFFSET - reg)) 4693 size = IDEDMA_SCH_OFFSET - reg; 4694 if (bus_space_subregion(sl->ba5_st, 4695 sl->ba5_sh, 4696 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4697 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4698 sc->sc_dma_ok = 0; 4699 printf(": can't subregion offset " 4700 "%lu size %lu", 4701 (u_long) satalink_ba5_regmap[ 4702 chan].ba5_IDEDMA_CMD + reg, 4703 (u_long) size); 4704 return; 4705 } 4706 } 4707 } 4708 4709 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4710 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4711 sc->sc_dmactl_read = sii3114_dmactl_read; 4712 sc->sc_dmactl_write = sii3114_dmactl_write; 4713 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4714 4715 /* DMA registers all set up! */ 4716 sc->sc_dmat = pa->pa_dmat; 4717 sc->sc_dma_ok = 1; 4718 } 4719 4720 int 4721 sii3114_chansetup(struct pciide_softc *sc, int channel) 4722 { 4723 static const char *channel_names[] = { 4724 "port 0", 4725 "port 1", 4726 "port 2", 4727 "port 3", 4728 }; 4729 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4730 4731 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4732 4733 /* 4734 * We must always keep the Interrupt Steering bit set in channel 2's 4735 * IDEDMA_CMD register. 4736 */ 4737 if (channel == 2) 4738 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4739 4740 cp->name = channel_names[channel]; 4741 cp->wdc_channel.channel = channel; 4742 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4743 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4744 if (cp->wdc_channel.ch_queue == NULL) { 4745 printf("%s %s channel: " 4746 "cannot allocate channel queue", 4747 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4748 return (0); 4749 } 4750 return (1); 4751 } 4752 4753 void 4754 sii3114_mapchan(struct pciide_channel *cp) 4755 { 4756 struct channel_softc *wdc_cp = &cp->wdc_channel; 4757 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4758 struct pciide_satalink *sl = sc->sc_cookie; 4759 int chan = wdc_cp->channel; 4760 int i; 4761 4762 cp->hw_ok = 0; 4763 cp->compat = 0; 4764 cp->ih = sc->sc_pci_ih; 4765 4766 sl->regs[chan].cmd_iot = sl->ba5_st; 4767 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4768 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4769 9, &sl->regs[chan].cmd_baseioh) != 0) { 4770 printf("%s: couldn't subregion %s cmd base\n", 4771 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4772 return; 4773 } 4774 4775 sl->regs[chan].ctl_iot = sl->ba5_st; 4776 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4777 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4778 1, &cp->ctl_baseioh) != 0) { 4779 printf("%s: couldn't subregion %s ctl base\n", 4780 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4781 return; 4782 } 4783 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4784 4785 for (i = 0; i < WDC_NREG; i++) { 4786 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4787 sl->regs[chan].cmd_baseioh, 4788 i, i == 0 ? 4 : 1, 4789 &sl->regs[chan].cmd_iohs[i]) != 0) { 4790 printf("%s: couldn't subregion %s channel " 4791 "cmd regs\n", 4792 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4793 return; 4794 } 4795 } 4796 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4797 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4798 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4799 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4800 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4801 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4802 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4803 wdcattach(wdc_cp); 4804 cp->hw_ok = 1; 4805 } 4806 4807 u_int8_t 4808 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4809 { 4810 struct pciide_channel *cp = (struct pciide_channel *)chp; 4811 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4812 struct pciide_satalink *sl = sc->sc_cookie; 4813 4814 if (reg & _WDC_AUX) 4815 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4816 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4817 else 4818 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4819 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4820 } 4821 4822 void 4823 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4824 { 4825 struct pciide_channel *cp = (struct pciide_channel *)chp; 4826 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4827 struct pciide_satalink *sl = sc->sc_cookie; 4828 4829 if (reg & _WDC_AUX) 4830 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4831 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4832 else 4833 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4834 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4835 0, val); 4836 } 4837 4838 u_int8_t 4839 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4840 { 4841 struct pciide_satalink *sl = sc->sc_cookie; 4842 4843 return (bus_space_read_1(sc->sc_dma_iot, 4844 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4845 } 4846 4847 void 4848 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4849 { 4850 struct pciide_satalink *sl = sc->sc_cookie; 4851 4852 bus_space_write_1(sc->sc_dma_iot, 4853 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4854 } 4855 4856 u_int8_t 4857 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4858 { 4859 struct pciide_satalink *sl = sc->sc_cookie; 4860 4861 return (bus_space_read_1(sc->sc_dma_iot, 4862 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4863 } 4864 4865 void 4866 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4867 { 4868 struct pciide_satalink *sl = sc->sc_cookie; 4869 4870 bus_space_write_1(sc->sc_dma_iot, 4871 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4872 } 4873 4874 void 4875 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4876 { 4877 struct pciide_satalink *sl = sc->sc_cookie; 4878 4879 bus_space_write_4(sc->sc_dma_iot, 4880 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4881 } 4882 4883 void 4884 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4885 { 4886 struct pciide_channel *cp; 4887 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4888 bus_size_t cmdsize, ctlsize; 4889 struct pciide_cy *cy; 4890 4891 /* Allocate memory for private data */ 4892 sc->sc_cookielen = sizeof(*cy); 4893 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 4894 cy = sc->sc_cookie; 4895 4896 /* 4897 * this chip has 2 PCI IDE functions, one for primary and one for 4898 * secondary. So we need to call pciide_mapregs_compat() with 4899 * the real channel 4900 */ 4901 if (pa->pa_function == 1) { 4902 cy->cy_compatchan = 0; 4903 } else if (pa->pa_function == 2) { 4904 cy->cy_compatchan = 1; 4905 } else { 4906 printf(": unexpected PCI function %d\n", pa->pa_function); 4907 return; 4908 } 4909 4910 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4911 printf(": DMA"); 4912 pciide_mapreg_dma(sc, pa); 4913 } else { 4914 printf(": no DMA"); 4915 sc->sc_dma_ok = 0; 4916 } 4917 4918 cy->cy_handle = cy82c693_init(pa->pa_iot); 4919 if (cy->cy_handle == NULL) { 4920 printf(", (unable to map ctl registers)"); 4921 sc->sc_dma_ok = 0; 4922 } 4923 4924 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4925 WDC_CAPABILITY_MODE; 4926 if (sc->sc_dma_ok) { 4927 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4928 sc->sc_wdcdev.irqack = pciide_irqack; 4929 } 4930 sc->sc_wdcdev.PIO_cap = 4; 4931 sc->sc_wdcdev.DMA_cap = 2; 4932 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4933 4934 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4935 sc->sc_wdcdev.nchannels = 1; 4936 4937 /* Only one channel for this chip; if we are here it's enabled */ 4938 cp = &sc->pciide_channels[0]; 4939 sc->wdc_chanarray[0] = &cp->wdc_channel; 4940 cp->name = PCIIDE_CHANNEL_NAME(0); 4941 cp->wdc_channel.channel = 0; 4942 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4943 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4944 if (cp->wdc_channel.ch_queue == NULL) { 4945 printf(": cannot allocate channel queue\n"); 4946 return; 4947 } 4948 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4949 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4950 "configured" : "wired"); 4951 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4952 printf("native-PCI\n"); 4953 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4954 pciide_pci_intr); 4955 } else { 4956 printf("compatibility\n"); 4957 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4958 &cmdsize, &ctlsize); 4959 } 4960 4961 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4962 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4963 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4964 if (cp->hw_ok == 0) 4965 return; 4966 wdcattach(&cp->wdc_channel); 4967 if (pciide_chan_candisable(cp)) { 4968 pci_conf_write(sc->sc_pc, sc->sc_tag, 4969 PCI_COMMAND_STATUS_REG, 0); 4970 } 4971 if (cp->hw_ok == 0) { 4972 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4973 interface); 4974 return; 4975 } 4976 4977 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4978 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4979 cy693_setup_channel(&cp->wdc_channel); 4980 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4981 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4982 } 4983 4984 void 4985 cy693_setup_channel(struct channel_softc *chp) 4986 { 4987 struct ata_drive_datas *drvp; 4988 int drive; 4989 u_int32_t cy_cmd_ctrl; 4990 u_int32_t idedma_ctl; 4991 struct pciide_channel *cp = (struct pciide_channel *)chp; 4992 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4993 int dma_mode = -1; 4994 struct pciide_cy *cy = sc->sc_cookie; 4995 4996 cy_cmd_ctrl = idedma_ctl = 0; 4997 4998 /* setup DMA if needed */ 4999 pciide_channel_dma_setup(cp); 5000 5001 for (drive = 0; drive < 2; drive++) { 5002 drvp = &chp->ch_drive[drive]; 5003 /* If no drive, skip */ 5004 if ((drvp->drive_flags & DRIVE) == 0) 5005 continue; 5006 /* add timing values, setup DMA if needed */ 5007 if (drvp->drive_flags & DRIVE_DMA) { 5008 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5009 /* use Multiword DMA */ 5010 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 5011 dma_mode = drvp->DMA_mode; 5012 } 5013 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 5014 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 5015 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 5016 CY_CMD_CTRL_IOW_REC_OFF(drive)); 5017 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 5018 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 5019 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 5020 CY_CMD_CTRL_IOR_REC_OFF(drive)); 5021 } 5022 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 5023 chp->ch_drive[0].DMA_mode = dma_mode; 5024 chp->ch_drive[1].DMA_mode = dma_mode; 5025 5026 if (dma_mode == -1) 5027 dma_mode = 0; 5028 5029 if (cy->cy_handle != NULL) { 5030 /* Note: `multiple' is implied. */ 5031 cy82c693_write(cy->cy_handle, 5032 (cy->cy_compatchan == 0) ? 5033 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 5034 } 5035 5036 pciide_print_modes(cp); 5037 5038 if (idedma_ctl != 0) { 5039 /* Add software bits in status register */ 5040 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5041 IDEDMA_CTL(chp->channel), idedma_ctl); 5042 } 5043 } 5044 5045 static struct sis_hostbr_type { 5046 u_int16_t id; 5047 u_int8_t rev; 5048 u_int8_t udma_mode; 5049 char *name; 5050 u_int8_t type; 5051 #define SIS_TYPE_NOUDMA 0 5052 #define SIS_TYPE_66 1 5053 #define SIS_TYPE_100OLD 2 5054 #define SIS_TYPE_100NEW 3 5055 #define SIS_TYPE_133OLD 4 5056 #define SIS_TYPE_133NEW 5 5057 #define SIS_TYPE_SOUTH 6 5058 } sis_hostbr_type[] = { 5059 /* Most infos here are from sos@freebsd.org */ 5060 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 5061 #if 0 5062 /* 5063 * controllers associated to a rev 0x2 530 Host to PCI Bridge 5064 * have problems with UDMA (info provided by Christos) 5065 */ 5066 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 5067 #endif 5068 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 5069 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 5070 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 5071 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 5072 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 5073 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 5074 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 5075 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 5076 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 5077 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 5078 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 5079 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 5080 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 5081 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 5082 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 5083 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 5084 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 5085 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 5086 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 5087 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 5088 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 5089 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 5090 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 5091 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 5092 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 5093 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 5094 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 5095 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 5096 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 5097 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 5098 /* 5099 * From sos@freebsd.org: the 0x961 ID will never be found in real world 5100 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 5101 */ 5102 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 5103 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 5104 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 5105 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW}, 5106 {PCI_PRODUCT_SIS_966, 0x00, 6, "966", SIS_TYPE_133NEW}, 5107 {PCI_PRODUCT_SIS_968, 0x00, 6, "968", SIS_TYPE_133NEW} 5108 }; 5109 5110 static struct sis_hostbr_type *sis_hostbr_type_match; 5111 5112 int 5113 sis_hostbr_match(struct pci_attach_args *pa) 5114 { 5115 int i; 5116 5117 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 5118 return (0); 5119 sis_hostbr_type_match = NULL; 5120 for (i = 0; 5121 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 5122 i++) { 5123 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 5124 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 5125 sis_hostbr_type_match = &sis_hostbr_type[i]; 5126 } 5127 return (sis_hostbr_type_match != NULL); 5128 } 5129 5130 int 5131 sis_south_match(struct pci_attach_args *pa) 5132 { 5133 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 5134 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 5135 PCI_REVISION(pa->pa_class) >= 0x10); 5136 } 5137 5138 void 5139 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5140 { 5141 struct pciide_channel *cp; 5142 int channel; 5143 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 5144 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5145 int rev = sc->sc_rev; 5146 bus_size_t cmdsize, ctlsize; 5147 struct pciide_sis *sis; 5148 5149 /* Allocate memory for private data */ 5150 sc->sc_cookielen = sizeof(*sis); 5151 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 5152 sis = sc->sc_cookie; 5153 5154 pci_find_device(NULL, sis_hostbr_match); 5155 5156 if (sis_hostbr_type_match) { 5157 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 5158 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 5159 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5160 SIS_REG_57) & 0x7f); 5161 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 5162 sis->sis_type = SIS_TYPE_133NEW; 5163 sc->sc_wdcdev.UDMA_cap = 5164 sis_hostbr_type_match->udma_mode; 5165 } else { 5166 if (pci_find_device(NULL, sis_south_match)) { 5167 sis->sis_type = SIS_TYPE_133OLD; 5168 sc->sc_wdcdev.UDMA_cap = 5169 sis_hostbr_type_match->udma_mode; 5170 } else { 5171 sis->sis_type = SIS_TYPE_100NEW; 5172 sc->sc_wdcdev.UDMA_cap = 5173 sis_hostbr_type_match->udma_mode; 5174 } 5175 } 5176 } else { 5177 sis->sis_type = sis_hostbr_type_match->type; 5178 sc->sc_wdcdev.UDMA_cap = 5179 sis_hostbr_type_match->udma_mode; 5180 } 5181 printf(": %s", sis_hostbr_type_match->name); 5182 } else { 5183 printf(": 5597/5598"); 5184 if (rev >= 0xd0) { 5185 sc->sc_wdcdev.UDMA_cap = 2; 5186 sis->sis_type = SIS_TYPE_66; 5187 } else { 5188 sc->sc_wdcdev.UDMA_cap = 0; 5189 sis->sis_type = SIS_TYPE_NOUDMA; 5190 } 5191 } 5192 5193 printf(": DMA"); 5194 pciide_mapreg_dma(sc, pa); 5195 5196 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5197 WDC_CAPABILITY_MODE; 5198 if (sc->sc_dma_ok) { 5199 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5200 sc->sc_wdcdev.irqack = pciide_irqack; 5201 if (sis->sis_type >= SIS_TYPE_66) 5202 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5203 } 5204 5205 sc->sc_wdcdev.PIO_cap = 4; 5206 sc->sc_wdcdev.DMA_cap = 2; 5207 5208 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5209 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5210 switch (sis->sis_type) { 5211 case SIS_TYPE_NOUDMA: 5212 case SIS_TYPE_66: 5213 case SIS_TYPE_100OLD: 5214 sc->sc_wdcdev.set_modes = sis_setup_channel; 5215 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 5216 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 5217 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 5218 break; 5219 case SIS_TYPE_100NEW: 5220 case SIS_TYPE_133OLD: 5221 sc->sc_wdcdev.set_modes = sis_setup_channel; 5222 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 5223 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 5224 break; 5225 case SIS_TYPE_133NEW: 5226 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 5227 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 5228 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 5229 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 5230 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 5231 break; 5232 } 5233 5234 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5235 5236 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5237 cp = &sc->pciide_channels[channel]; 5238 if (pciide_chansetup(sc, channel, interface) == 0) 5239 continue; 5240 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 5241 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 5242 printf("%s: %s ignored (disabled)\n", 5243 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5244 cp->hw_ok = 0; 5245 continue; 5246 } 5247 pciide_map_compat_intr(pa, cp, channel, interface); 5248 if (cp->hw_ok == 0) 5249 continue; 5250 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5251 pciide_pci_intr); 5252 if (cp->hw_ok == 0) { 5253 pciide_unmap_compat_intr(pa, cp, channel, interface); 5254 continue; 5255 } 5256 if (pciide_chan_candisable(cp)) { 5257 if (channel == 0) 5258 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 5259 else 5260 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 5261 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 5262 sis_ctr0); 5263 } 5264 if (cp->hw_ok == 0) { 5265 pciide_unmap_compat_intr(pa, cp, channel, interface); 5266 continue; 5267 } 5268 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5269 } 5270 } 5271 5272 void 5273 sis96x_setup_channel(struct channel_softc *chp) 5274 { 5275 struct ata_drive_datas *drvp; 5276 int drive; 5277 u_int32_t sis_tim; 5278 u_int32_t idedma_ctl; 5279 int regtim; 5280 struct pciide_channel *cp = (struct pciide_channel *)chp; 5281 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5282 5283 sis_tim = 0; 5284 idedma_ctl = 0; 5285 /* setup DMA if needed */ 5286 pciide_channel_dma_setup(cp); 5287 5288 for (drive = 0; drive < 2; drive++) { 5289 regtim = SIS_TIM133( 5290 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 5291 chp->channel, drive); 5292 drvp = &chp->ch_drive[drive]; 5293 /* If no drive, skip */ 5294 if ((drvp->drive_flags & DRIVE) == 0) 5295 continue; 5296 /* add timing values, setup DMA if needed */ 5297 if (drvp->drive_flags & DRIVE_UDMA) { 5298 /* use Ultra/DMA */ 5299 drvp->drive_flags &= ~DRIVE_DMA; 5300 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5301 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 5302 if (drvp->UDMA_mode > 2) 5303 drvp->UDMA_mode = 2; 5304 } 5305 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 5306 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5307 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5308 } else if (drvp->drive_flags & DRIVE_DMA) { 5309 /* 5310 * use Multiword DMA 5311 * Timings will be used for both PIO and DMA, 5312 * so adjust DMA mode if needed 5313 */ 5314 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5315 drvp->PIO_mode = drvp->DMA_mode + 2; 5316 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5317 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5318 drvp->PIO_mode - 2 : 0; 5319 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 5320 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5321 } else { 5322 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5323 } 5324 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 5325 "channel %d drive %d: 0x%x (reg 0x%x)\n", 5326 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 5327 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 5328 } 5329 if (idedma_ctl != 0) { 5330 /* Add software bits in status register */ 5331 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5332 IDEDMA_CTL(chp->channel), idedma_ctl); 5333 } 5334 pciide_print_modes(cp); 5335 } 5336 5337 void 5338 sis_setup_channel(struct channel_softc *chp) 5339 { 5340 struct ata_drive_datas *drvp; 5341 int drive; 5342 u_int32_t sis_tim; 5343 u_int32_t idedma_ctl; 5344 struct pciide_channel *cp = (struct pciide_channel *)chp; 5345 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5346 struct pciide_sis *sis = sc->sc_cookie; 5347 5348 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 5349 "channel %d 0x%x\n", chp->channel, 5350 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 5351 DEBUG_PROBE); 5352 sis_tim = 0; 5353 idedma_ctl = 0; 5354 /* setup DMA if needed */ 5355 pciide_channel_dma_setup(cp); 5356 5357 for (drive = 0; drive < 2; drive++) { 5358 drvp = &chp->ch_drive[drive]; 5359 /* If no drive, skip */ 5360 if ((drvp->drive_flags & DRIVE) == 0) 5361 continue; 5362 /* add timing values, setup DMA if needed */ 5363 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5364 (drvp->drive_flags & DRIVE_UDMA) == 0) 5365 goto pio; 5366 5367 if (drvp->drive_flags & DRIVE_UDMA) { 5368 /* use Ultra/DMA */ 5369 drvp->drive_flags &= ~DRIVE_DMA; 5370 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5371 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 5372 if (drvp->UDMA_mode > 2) 5373 drvp->UDMA_mode = 2; 5374 } 5375 switch (sis->sis_type) { 5376 case SIS_TYPE_66: 5377 case SIS_TYPE_100OLD: 5378 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 5379 SIS_TIM66_UDMA_TIME_OFF(drive); 5380 break; 5381 case SIS_TYPE_100NEW: 5382 sis_tim |= 5383 sis_udma100new_tim[drvp->UDMA_mode] << 5384 SIS_TIM100_UDMA_TIME_OFF(drive); 5385 break; 5386 case SIS_TYPE_133OLD: 5387 sis_tim |= 5388 sis_udma133old_tim[drvp->UDMA_mode] << 5389 SIS_TIM100_UDMA_TIME_OFF(drive); 5390 break; 5391 default: 5392 printf("unknown SiS IDE type %d\n", 5393 sis->sis_type); 5394 } 5395 } else { 5396 /* 5397 * use Multiword DMA 5398 * Timings will be used for both PIO and DMA, 5399 * so adjust DMA mode if needed 5400 */ 5401 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5402 drvp->PIO_mode = drvp->DMA_mode + 2; 5403 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5404 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5405 drvp->PIO_mode - 2 : 0; 5406 if (drvp->DMA_mode == 0) 5407 drvp->PIO_mode = 0; 5408 } 5409 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5410 pio: switch (sis->sis_type) { 5411 case SIS_TYPE_NOUDMA: 5412 case SIS_TYPE_66: 5413 case SIS_TYPE_100OLD: 5414 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5415 SIS_TIM66_ACT_OFF(drive); 5416 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5417 SIS_TIM66_REC_OFF(drive); 5418 break; 5419 case SIS_TYPE_100NEW: 5420 case SIS_TYPE_133OLD: 5421 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5422 SIS_TIM100_ACT_OFF(drive); 5423 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5424 SIS_TIM100_REC_OFF(drive); 5425 break; 5426 default: 5427 printf("unknown SiS IDE type %d\n", 5428 sis->sis_type); 5429 } 5430 } 5431 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 5432 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 5433 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 5434 if (idedma_ctl != 0) { 5435 /* Add software bits in status register */ 5436 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5437 IDEDMA_CTL(chp->channel), idedma_ctl); 5438 } 5439 pciide_print_modes(cp); 5440 } 5441 5442 void 5443 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5444 { 5445 struct pciide_channel *cp; 5446 int channel; 5447 pcireg_t interface, ctl; 5448 bus_size_t cmdsize, ctlsize; 5449 5450 printf(": DMA"); 5451 pciide_mapreg_dma(sc, pa); 5452 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 5453 5454 if (sc->sc_dma_ok) { 5455 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5456 sc->sc_wdcdev.irqack = natsemi_irqack; 5457 } 5458 5459 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5460 5461 /* 5462 * Mask off interrupts from both channels, appropriate channel(s) 5463 * will be unmasked later. 5464 */ 5465 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5466 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5467 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5468 5469 sc->sc_wdcdev.PIO_cap = 4; 5470 sc->sc_wdcdev.DMA_cap = 2; 5471 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5472 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5473 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5474 5475 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5476 PCI_CLASS_REG)); 5477 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5478 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5479 5480 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5481 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5482 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5483 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5484 else 5485 ctl |= NATSEMI_CTRL1_INTAMASK; 5486 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5487 5488 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5489 cp = &sc->pciide_channels[channel]; 5490 if (pciide_chansetup(sc, channel, interface) == 0) 5491 continue; 5492 5493 pciide_map_compat_intr(pa, cp, channel, interface); 5494 if (cp->hw_ok == 0) 5495 continue; 5496 5497 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5498 natsemi_pci_intr); 5499 if (cp->hw_ok == 0) { 5500 pciide_unmap_compat_intr(pa, cp, channel, interface); 5501 continue; 5502 } 5503 natsemi_setup_channel(&cp->wdc_channel); 5504 } 5505 } 5506 5507 void 5508 natsemi_setup_channel(struct channel_softc *chp) 5509 { 5510 struct ata_drive_datas *drvp; 5511 int drive, ndrives = 0; 5512 u_int32_t idedma_ctl = 0; 5513 struct pciide_channel *cp = (struct pciide_channel *)chp; 5514 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5515 u_int8_t tim; 5516 5517 /* setup DMA if needed */ 5518 pciide_channel_dma_setup(cp); 5519 5520 for (drive = 0; drive < 2; drive++) { 5521 drvp = &chp->ch_drive[drive]; 5522 /* If no drive, skip */ 5523 if ((drvp->drive_flags & DRIVE) == 0) 5524 continue; 5525 5526 ndrives++; 5527 /* add timing values, setup DMA if needed */ 5528 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5529 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5530 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5531 } else { 5532 /* 5533 * use Multiword DMA 5534 * Timings will be used for both PIO and DMA, 5535 * so adjust DMA mode if needed 5536 */ 5537 if (drvp->PIO_mode >= 3 && 5538 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5539 drvp->DMA_mode = drvp->PIO_mode - 2; 5540 } 5541 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5542 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5543 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5544 } 5545 5546 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5547 NATSEMI_RTREG(chp->channel, drive), tim); 5548 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5549 NATSEMI_WTREG(chp->channel, drive), tim); 5550 } 5551 if (idedma_ctl != 0) { 5552 /* Add software bits in status register */ 5553 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5554 IDEDMA_CTL(chp->channel), idedma_ctl); 5555 } 5556 if (ndrives > 0) { 5557 /* Unmask the channel if at least one drive is found */ 5558 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5559 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5560 ~(NATSEMI_CHMASK(chp->channel))); 5561 } 5562 5563 pciide_print_modes(cp); 5564 5565 /* Go ahead and ack interrupts generated during probe. */ 5566 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5567 IDEDMA_CTL(chp->channel), 5568 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5569 IDEDMA_CTL(chp->channel))); 5570 } 5571 5572 void 5573 natsemi_irqack(struct channel_softc *chp) 5574 { 5575 struct pciide_channel *cp = (struct pciide_channel *)chp; 5576 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5577 u_int8_t clr; 5578 5579 /* The "clear" bits are in the wrong register *sigh* */ 5580 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5581 IDEDMA_CMD(chp->channel)); 5582 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5583 IDEDMA_CTL(chp->channel)) & 5584 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5585 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5586 IDEDMA_CMD(chp->channel), clr); 5587 } 5588 5589 int 5590 natsemi_pci_intr(void *arg) 5591 { 5592 struct pciide_softc *sc = arg; 5593 struct pciide_channel *cp; 5594 struct channel_softc *wdc_cp; 5595 int i, rv, crv; 5596 u_int8_t msk; 5597 5598 rv = 0; 5599 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5600 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5601 cp = &sc->pciide_channels[i]; 5602 wdc_cp = &cp->wdc_channel; 5603 5604 /* If a compat channel skip. */ 5605 if (cp->compat) 5606 continue; 5607 5608 /* If this channel is masked, skip it. */ 5609 if (msk & NATSEMI_CHMASK(i)) 5610 continue; 5611 5612 if (pciide_intr_flag(cp) == 0) 5613 continue; 5614 5615 crv = wdcintr(wdc_cp); 5616 if (crv == 0) 5617 ; /* leave rv alone */ 5618 else if (crv == 1) 5619 rv = 1; /* claim the intr */ 5620 else if (rv == 0) /* crv should be -1 in this case */ 5621 rv = crv; /* if we've done no better, take it */ 5622 } 5623 return (rv); 5624 } 5625 5626 void 5627 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5628 { 5629 struct pciide_channel *cp; 5630 int channel; 5631 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5632 bus_size_t cmdsize, ctlsize; 5633 5634 printf(": DMA"); 5635 pciide_mapreg_dma(sc, pa); 5636 5637 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5638 WDC_CAPABILITY_MODE; 5639 if (sc->sc_dma_ok) { 5640 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5641 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5642 sc->sc_wdcdev.irqack = pciide_irqack; 5643 } 5644 sc->sc_wdcdev.PIO_cap = 4; 5645 sc->sc_wdcdev.DMA_cap = 2; 5646 sc->sc_wdcdev.UDMA_cap = 2; 5647 5648 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5649 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5650 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5651 5652 /* 5653 * Soekris net4801 errata 0003: 5654 * 5655 * The SC1100 built in busmaster IDE controller is pretty standard, 5656 * but have two bugs: data transfers need to be dword aligned and 5657 * it cannot do an exact 64Kbyte data transfer. 5658 * 5659 * Assume that reducing maximum segment size by one page 5660 * will be enough, and restrict boundary too for extra certainty. 5661 */ 5662 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5663 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5664 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5665 } 5666 5667 /* 5668 * This chip seems to be unable to do one-sector transfers 5669 * using DMA. 5670 */ 5671 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5672 5673 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5674 5675 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5676 cp = &sc->pciide_channels[channel]; 5677 if (pciide_chansetup(sc, channel, interface) == 0) 5678 continue; 5679 pciide_map_compat_intr(pa, cp, channel, interface); 5680 if (cp->hw_ok == 0) 5681 continue; 5682 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5683 pciide_pci_intr); 5684 if (cp->hw_ok == 0) { 5685 pciide_unmap_compat_intr(pa, cp, channel, interface); 5686 continue; 5687 } 5688 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5689 } 5690 } 5691 5692 void 5693 ns_scx200_setup_channel(struct channel_softc *chp) 5694 { 5695 struct ata_drive_datas *drvp; 5696 int drive, mode; 5697 u_int32_t idedma_ctl; 5698 struct pciide_channel *cp = (struct pciide_channel*)chp; 5699 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5700 int channel = chp->channel; 5701 int pioformat; 5702 pcireg_t piotim, dmatim; 5703 5704 /* Setup DMA if needed */ 5705 pciide_channel_dma_setup(cp); 5706 5707 idedma_ctl = 0; 5708 5709 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5710 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5711 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5712 DEBUG_PROBE); 5713 5714 /* Per channel settings */ 5715 for (drive = 0; drive < 2; drive++) { 5716 drvp = &chp->ch_drive[drive]; 5717 5718 /* If no drive, skip */ 5719 if ((drvp->drive_flags & DRIVE) == 0) 5720 continue; 5721 5722 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5723 SCx200_TIM_PIO(channel, drive)); 5724 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5725 SCx200_TIM_DMA(channel, drive)); 5726 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5727 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5728 piotim, dmatim), DEBUG_PROBE); 5729 5730 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5731 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5732 /* Setup UltraDMA mode */ 5733 drvp->drive_flags &= ~DRIVE_DMA; 5734 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5735 dmatim = scx200_udma33[drvp->UDMA_mode]; 5736 mode = drvp->PIO_mode; 5737 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5738 (drvp->drive_flags & DRIVE_DMA) != 0) { 5739 /* Setup multiword DMA mode */ 5740 drvp->drive_flags &= ~DRIVE_UDMA; 5741 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5742 dmatim = scx200_dma33[drvp->DMA_mode]; 5743 5744 /* mode = min(pio, dma + 2) */ 5745 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5746 mode = drvp->PIO_mode; 5747 else 5748 mode = drvp->DMA_mode + 2; 5749 } else { 5750 mode = drvp->PIO_mode; 5751 } 5752 5753 /* Setup PIO mode */ 5754 drvp->PIO_mode = mode; 5755 if (mode < 2) 5756 drvp->DMA_mode = 0; 5757 else 5758 drvp->DMA_mode = mode - 2; 5759 5760 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5761 5762 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5763 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5764 piotim, dmatim), DEBUG_PROBE); 5765 5766 pci_conf_write(sc->sc_pc, sc->sc_tag, 5767 SCx200_TIM_PIO(channel, drive), piotim); 5768 pci_conf_write(sc->sc_pc, sc->sc_tag, 5769 SCx200_TIM_DMA(channel, drive), dmatim); 5770 } 5771 5772 if (idedma_ctl != 0) { 5773 /* Add software bits in status register */ 5774 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5775 IDEDMA_CTL(channel), idedma_ctl); 5776 } 5777 5778 pciide_print_modes(cp); 5779 } 5780 5781 void 5782 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5783 { 5784 struct pciide_channel *cp; 5785 int channel; 5786 pcireg_t cr, interface; 5787 bus_size_t cmdsize, ctlsize; 5788 int rev = sc->sc_rev; 5789 5790 printf(": DMA"); 5791 pciide_mapreg_dma(sc, pa); 5792 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5793 WDC_CAPABILITY_MODE; 5794 5795 if (sc->sc_dma_ok) { 5796 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5797 if (rev >= 0x20) { 5798 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5799 if (rev >= 0xC4) 5800 sc->sc_wdcdev.UDMA_cap = 5; 5801 else if (rev >= 0xC2) 5802 sc->sc_wdcdev.UDMA_cap = 4; 5803 else 5804 sc->sc_wdcdev.UDMA_cap = 2; 5805 } 5806 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5807 sc->sc_wdcdev.irqack = pciide_irqack; 5808 if (rev <= 0xC4) 5809 sc->sc_wdcdev.dma_init = acer_dma_init; 5810 } 5811 5812 sc->sc_wdcdev.PIO_cap = 4; 5813 sc->sc_wdcdev.DMA_cap = 2; 5814 sc->sc_wdcdev.set_modes = acer_setup_channel; 5815 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5816 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5817 5818 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5819 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5820 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5821 5822 /* Enable "microsoft register bits" R/W. */ 5823 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5824 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5825 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5826 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5827 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5828 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5829 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5830 ~ACER_CHANSTATUSREGS_RO); 5831 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5832 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5833 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5834 /* Don't use cr, re-read the real register content instead */ 5835 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5836 PCI_CLASS_REG)); 5837 5838 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5839 5840 /* From linux: enable "Cable Detection" */ 5841 if (rev >= 0xC2) 5842 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5843 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5844 | ACER_0x4B_CDETECT); 5845 5846 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5847 cp = &sc->pciide_channels[channel]; 5848 if (pciide_chansetup(sc, channel, interface) == 0) 5849 continue; 5850 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5851 printf("%s: %s ignored (disabled)\n", 5852 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5853 cp->hw_ok = 0; 5854 continue; 5855 } 5856 pciide_map_compat_intr(pa, cp, channel, interface); 5857 if (cp->hw_ok == 0) 5858 continue; 5859 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5860 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5861 if (cp->hw_ok == 0) { 5862 pciide_unmap_compat_intr(pa, cp, channel, interface); 5863 continue; 5864 } 5865 if (pciide_chan_candisable(cp)) { 5866 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5867 pci_conf_write(sc->sc_pc, sc->sc_tag, 5868 PCI_CLASS_REG, cr); 5869 } 5870 if (cp->hw_ok == 0) { 5871 pciide_unmap_compat_intr(pa, cp, channel, interface); 5872 continue; 5873 } 5874 acer_setup_channel(&cp->wdc_channel); 5875 } 5876 } 5877 5878 void 5879 acer_setup_channel(struct channel_softc *chp) 5880 { 5881 struct ata_drive_datas *drvp; 5882 int drive; 5883 u_int32_t acer_fifo_udma; 5884 u_int32_t idedma_ctl; 5885 struct pciide_channel *cp = (struct pciide_channel *)chp; 5886 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5887 5888 idedma_ctl = 0; 5889 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5890 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5891 acer_fifo_udma), DEBUG_PROBE); 5892 /* setup DMA if needed */ 5893 pciide_channel_dma_setup(cp); 5894 5895 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5896 DRIVE_UDMA) { /* check 80 pins cable */ 5897 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5898 ACER_0x4A_80PIN(chp->channel)) { 5899 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5900 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5901 DEBUG_PROBE); 5902 if (chp->ch_drive[0].UDMA_mode > 2) 5903 chp->ch_drive[0].UDMA_mode = 2; 5904 if (chp->ch_drive[1].UDMA_mode > 2) 5905 chp->ch_drive[1].UDMA_mode = 2; 5906 } 5907 } 5908 5909 for (drive = 0; drive < 2; drive++) { 5910 drvp = &chp->ch_drive[drive]; 5911 /* If no drive, skip */ 5912 if ((drvp->drive_flags & DRIVE) == 0) 5913 continue; 5914 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5915 "channel %d drive %d 0x%x\n", chp->channel, drive, 5916 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5917 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5918 /* clear FIFO/DMA mode */ 5919 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5920 ACER_UDMA_EN(chp->channel, drive) | 5921 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5922 5923 /* add timing values, setup DMA if needed */ 5924 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5925 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5926 acer_fifo_udma |= 5927 ACER_FTH_OPL(chp->channel, drive, 0x1); 5928 goto pio; 5929 } 5930 5931 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5932 if (drvp->drive_flags & DRIVE_UDMA) { 5933 /* use Ultra/DMA */ 5934 drvp->drive_flags &= ~DRIVE_DMA; 5935 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5936 acer_fifo_udma |= 5937 ACER_UDMA_TIM(chp->channel, drive, 5938 acer_udma[drvp->UDMA_mode]); 5939 /* XXX disable if one drive < UDMA3 ? */ 5940 if (drvp->UDMA_mode >= 3) { 5941 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5942 ACER_0x4B, 5943 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5944 ACER_0x4B) | ACER_0x4B_UDMA66); 5945 } 5946 } else { 5947 /* 5948 * use Multiword DMA 5949 * Timings will be used for both PIO and DMA, 5950 * so adjust DMA mode if needed 5951 */ 5952 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5953 drvp->PIO_mode = drvp->DMA_mode + 2; 5954 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5955 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5956 drvp->PIO_mode - 2 : 0; 5957 if (drvp->DMA_mode == 0) 5958 drvp->PIO_mode = 0; 5959 } 5960 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5961 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5962 ACER_IDETIM(chp->channel, drive), 5963 acer_pio[drvp->PIO_mode]); 5964 } 5965 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5966 acer_fifo_udma), DEBUG_PROBE); 5967 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5968 if (idedma_ctl != 0) { 5969 /* Add software bits in status register */ 5970 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5971 IDEDMA_CTL(chp->channel), idedma_ctl); 5972 } 5973 pciide_print_modes(cp); 5974 } 5975 5976 int 5977 acer_pci_intr(void *arg) 5978 { 5979 struct pciide_softc *sc = arg; 5980 struct pciide_channel *cp; 5981 struct channel_softc *wdc_cp; 5982 int i, rv, crv; 5983 u_int32_t chids; 5984 5985 rv = 0; 5986 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5987 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5988 cp = &sc->pciide_channels[i]; 5989 wdc_cp = &cp->wdc_channel; 5990 /* If a compat channel skip. */ 5991 if (cp->compat) 5992 continue; 5993 if (chids & ACER_CHIDS_INT(i)) { 5994 crv = wdcintr(wdc_cp); 5995 if (crv == 0) 5996 printf("%s:%d: bogus intr\n", 5997 sc->sc_wdcdev.sc_dev.dv_xname, i); 5998 else 5999 rv = 1; 6000 } 6001 } 6002 return (rv); 6003 } 6004 6005 int 6006 acer_dma_init(void *v, int channel, int drive, void *databuf, 6007 size_t datalen, int flags) 6008 { 6009 /* Use PIO for LBA48 transfers. */ 6010 if (flags & WDC_DMA_LBA48) 6011 return (EINVAL); 6012 6013 return (pciide_dma_init(v, channel, drive, databuf, datalen, flags)); 6014 } 6015 6016 void 6017 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6018 { 6019 struct pciide_channel *cp; 6020 int i, compatchan, revision; 6021 pcireg_t interface; 6022 bus_size_t cmdsize, ctlsize; 6023 6024 revision = sc->sc_rev; 6025 6026 /* 6027 * when the chip is in native mode it identifies itself as a 6028 * 'misc mass storage'. Fake interface in this case. 6029 */ 6030 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 6031 interface = PCI_INTERFACE(pa->pa_class); 6032 } else { 6033 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 6034 PCIIDE_INTERFACE_PCI(0); 6035 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6036 (revision == HPT370_REV || revision == HPT370A_REV || 6037 revision == HPT372_REV)) || 6038 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6039 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6040 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6041 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 6042 interface |= PCIIDE_INTERFACE_PCI(1); 6043 } 6044 6045 printf(": DMA"); 6046 pciide_mapreg_dma(sc, pa); 6047 printf("\n"); 6048 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6049 WDC_CAPABILITY_MODE; 6050 if (sc->sc_dma_ok) { 6051 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6052 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6053 sc->sc_wdcdev.irqack = pciide_irqack; 6054 } 6055 sc->sc_wdcdev.PIO_cap = 4; 6056 sc->sc_wdcdev.DMA_cap = 2; 6057 6058 sc->sc_wdcdev.set_modes = hpt_setup_channel; 6059 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6060 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6061 revision == HPT366_REV) { 6062 sc->sc_wdcdev.UDMA_cap = 4; 6063 /* 6064 * The 366 has 2 PCI IDE functions, one for primary and one 6065 * for secondary. So we need to call pciide_mapregs_compat() 6066 * with the real channel 6067 */ 6068 if (pa->pa_function == 0) { 6069 compatchan = 0; 6070 } else if (pa->pa_function == 1) { 6071 compatchan = 1; 6072 } else { 6073 printf("%s: unexpected PCI function %d\n", 6074 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 6075 return; 6076 } 6077 sc->sc_wdcdev.nchannels = 1; 6078 } else { 6079 sc->sc_wdcdev.nchannels = 2; 6080 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6081 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6082 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6083 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 6084 sc->sc_wdcdev.UDMA_cap = 6; 6085 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 6086 if (revision == HPT372_REV) 6087 sc->sc_wdcdev.UDMA_cap = 6; 6088 else 6089 sc->sc_wdcdev.UDMA_cap = 5; 6090 } 6091 } 6092 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6093 cp = &sc->pciide_channels[i]; 6094 compatchan = 0; 6095 if (sc->sc_wdcdev.nchannels > 1) { 6096 compatchan = i; 6097 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 6098 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 6099 printf("%s: %s ignored (disabled)\n", 6100 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6101 cp->hw_ok = 0; 6102 continue; 6103 } 6104 } 6105 if (pciide_chansetup(sc, i, interface) == 0) 6106 continue; 6107 if (interface & PCIIDE_INTERFACE_PCI(i)) { 6108 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 6109 &ctlsize, hpt_pci_intr); 6110 } else { 6111 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 6112 &cmdsize, &ctlsize); 6113 } 6114 if (cp->hw_ok == 0) 6115 return; 6116 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 6117 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 6118 wdcattach(&cp->wdc_channel); 6119 hpt_setup_channel(&cp->wdc_channel); 6120 } 6121 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6122 (revision == HPT370_REV || revision == HPT370A_REV || 6123 revision == HPT372_REV)) || 6124 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6125 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6126 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6127 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 6128 /* 6129 * Turn off fast interrupts 6130 */ 6131 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 6132 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 6133 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 6134 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 6135 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 6136 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 6137 6138 /* 6139 * HPT370 and highter has a bit to disable interrupts, 6140 * make sure to clear it 6141 */ 6142 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 6143 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 6144 ~HPT_CSEL_IRQDIS); 6145 } 6146 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 6147 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6148 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6149 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6150 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 6151 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6152 revision == HPT372_REV)) 6153 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 6154 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 6155 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 6156 6157 return; 6158 } 6159 6160 void 6161 hpt_setup_channel(struct channel_softc *chp) 6162 { 6163 struct ata_drive_datas *drvp; 6164 int drive; 6165 int cable; 6166 u_int32_t before, after; 6167 u_int32_t idedma_ctl; 6168 struct pciide_channel *cp = (struct pciide_channel *)chp; 6169 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6170 int revision = sc->sc_rev; 6171 u_int32_t *tim_pio, *tim_dma, *tim_udma; 6172 6173 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 6174 6175 /* setup DMA if needed */ 6176 pciide_channel_dma_setup(cp); 6177 6178 idedma_ctl = 0; 6179 6180 switch (sc->sc_pp->ide_product) { 6181 case PCI_PRODUCT_TRIONES_HPT366: 6182 if (revision == HPT370_REV || 6183 revision == HPT370A_REV) { 6184 tim_pio = hpt370_pio; 6185 tim_dma = hpt370_dma; 6186 tim_udma = hpt370_udma; 6187 } else if (revision == HPT372_REV) { 6188 tim_pio = hpt372_pio; 6189 tim_dma = hpt372_dma; 6190 tim_udma = hpt372_udma; 6191 } else { 6192 tim_pio = hpt366_pio; 6193 tim_dma = hpt366_dma; 6194 tim_udma = hpt366_udma; 6195 } 6196 break; 6197 case PCI_PRODUCT_TRIONES_HPT372A: 6198 case PCI_PRODUCT_TRIONES_HPT302: 6199 case PCI_PRODUCT_TRIONES_HPT371: 6200 tim_pio = hpt372_pio; 6201 tim_dma = hpt372_dma; 6202 tim_udma = hpt372_udma; 6203 break; 6204 case PCI_PRODUCT_TRIONES_HPT374: 6205 tim_pio = hpt374_pio; 6206 tim_dma = hpt374_dma; 6207 tim_udma = hpt374_udma; 6208 break; 6209 default: 6210 printf("%s: no known timing values\n", 6211 sc->sc_wdcdev.sc_dev.dv_xname); 6212 goto end; 6213 } 6214 6215 /* Per drive settings */ 6216 for (drive = 0; drive < 2; drive++) { 6217 drvp = &chp->ch_drive[drive]; 6218 /* If no drive, skip */ 6219 if ((drvp->drive_flags & DRIVE) == 0) 6220 continue; 6221 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 6222 HPT_IDETIM(chp->channel, drive)); 6223 6224 /* add timing values, setup DMA if needed */ 6225 if (drvp->drive_flags & DRIVE_UDMA) { 6226 /* use Ultra/DMA */ 6227 drvp->drive_flags &= ~DRIVE_DMA; 6228 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 6229 drvp->UDMA_mode > 2) { 6230 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6231 "cable not detected\n", drvp->drive_name, 6232 sc->sc_wdcdev.sc_dev.dv_xname, 6233 chp->channel, drive), DEBUG_PROBE); 6234 drvp->UDMA_mode = 2; 6235 } 6236 after = tim_udma[drvp->UDMA_mode]; 6237 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6238 } else if (drvp->drive_flags & DRIVE_DMA) { 6239 /* 6240 * use Multiword DMA. 6241 * Timings will be used for both PIO and DMA, so adjust 6242 * DMA mode if needed 6243 */ 6244 if (drvp->PIO_mode >= 3 && 6245 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 6246 drvp->DMA_mode = drvp->PIO_mode - 2; 6247 } 6248 after = tim_dma[drvp->DMA_mode]; 6249 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6250 } else { 6251 /* PIO only */ 6252 after = tim_pio[drvp->PIO_mode]; 6253 } 6254 pci_conf_write(sc->sc_pc, sc->sc_tag, 6255 HPT_IDETIM(chp->channel, drive), after); 6256 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 6257 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 6258 after, before), DEBUG_PROBE); 6259 } 6260 end: 6261 if (idedma_ctl != 0) { 6262 /* Add software bits in status register */ 6263 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6264 IDEDMA_CTL(chp->channel), idedma_ctl); 6265 } 6266 pciide_print_modes(cp); 6267 } 6268 6269 int 6270 hpt_pci_intr(void *arg) 6271 { 6272 struct pciide_softc *sc = arg; 6273 struct pciide_channel *cp; 6274 struct channel_softc *wdc_cp; 6275 int rv = 0; 6276 int dmastat, i, crv; 6277 6278 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6279 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6280 IDEDMA_CTL(i)); 6281 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 6282 IDEDMA_CTL_INTR) 6283 continue; 6284 cp = &sc->pciide_channels[i]; 6285 wdc_cp = &cp->wdc_channel; 6286 crv = wdcintr(wdc_cp); 6287 if (crv == 0) { 6288 printf("%s:%d: bogus intr\n", 6289 sc->sc_wdcdev.sc_dev.dv_xname, i); 6290 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6291 IDEDMA_CTL(i), dmastat); 6292 } else 6293 rv = 1; 6294 } 6295 return (rv); 6296 } 6297 6298 /* Macros to test product */ 6299 #define PDC_IS_262(sc) \ 6300 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 6301 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6302 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 6303 #define PDC_IS_265(sc) \ 6304 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6305 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 6306 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6307 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6308 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6309 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6310 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6311 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6312 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6313 #define PDC_IS_268(sc) \ 6314 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6315 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6316 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6317 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6318 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6319 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6320 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6321 #define PDC_IS_269(sc) \ 6322 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6323 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6324 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6325 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6326 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6327 6328 u_int8_t 6329 pdc268_config_read(struct channel_softc *chp, int index) 6330 { 6331 struct pciide_channel *cp = (struct pciide_channel *)chp; 6332 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6333 int channel = chp->channel; 6334 6335 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6336 PDC268_INDEX(channel), index); 6337 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6338 PDC268_DATA(channel))); 6339 } 6340 6341 void 6342 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6343 { 6344 struct pciide_channel *cp; 6345 int channel; 6346 pcireg_t interface, st, mode; 6347 bus_size_t cmdsize, ctlsize; 6348 6349 if (!PDC_IS_268(sc)) { 6350 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6351 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 6352 st), DEBUG_PROBE); 6353 } 6354 6355 /* turn off RAID mode */ 6356 if (!PDC_IS_268(sc)) 6357 st &= ~PDC2xx_STATE_IDERAID; 6358 6359 /* 6360 * can't rely on the PCI_CLASS_REG content if the chip was in raid 6361 * mode. We have to fake interface 6362 */ 6363 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 6364 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 6365 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 6366 6367 printf(": DMA"); 6368 pciide_mapreg_dma(sc, pa); 6369 6370 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6371 WDC_CAPABILITY_MODE; 6372 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 6373 PDC_IS_262(sc)) 6374 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 6375 if (sc->sc_dma_ok) { 6376 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6377 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6378 sc->sc_wdcdev.irqack = pciide_irqack; 6379 } 6380 sc->sc_wdcdev.PIO_cap = 4; 6381 sc->sc_wdcdev.DMA_cap = 2; 6382 if (PDC_IS_269(sc)) 6383 sc->sc_wdcdev.UDMA_cap = 6; 6384 else if (PDC_IS_265(sc)) 6385 sc->sc_wdcdev.UDMA_cap = 5; 6386 else if (PDC_IS_262(sc)) 6387 sc->sc_wdcdev.UDMA_cap = 4; 6388 else 6389 sc->sc_wdcdev.UDMA_cap = 2; 6390 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 6391 pdc20268_setup_channel : pdc202xx_setup_channel; 6392 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6393 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6394 6395 if (PDC_IS_262(sc)) { 6396 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 6397 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 6398 } 6399 6400 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6401 if (!PDC_IS_268(sc)) { 6402 /* setup failsafe defaults */ 6403 mode = 0; 6404 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 6405 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 6406 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 6407 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 6408 for (channel = 0; 6409 channel < sc->sc_wdcdev.nchannels; 6410 channel++) { 6411 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6412 "drive 0 initial timings 0x%x, now 0x%x\n", 6413 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6414 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 6415 DEBUG_PROBE); 6416 pci_conf_write(sc->sc_pc, sc->sc_tag, 6417 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 6418 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6419 "drive 1 initial timings 0x%x, now 0x%x\n", 6420 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6421 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 6422 pci_conf_write(sc->sc_pc, sc->sc_tag, 6423 PDC2xx_TIM(channel, 1), mode); 6424 } 6425 6426 mode = PDC2xx_SCR_DMA; 6427 if (PDC_IS_262(sc)) { 6428 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 6429 } else { 6430 /* the BIOS set it up this way */ 6431 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 6432 } 6433 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 6434 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 6435 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 6436 "now 0x%x\n", 6437 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6438 PDC2xx_SCR), 6439 mode), DEBUG_PROBE); 6440 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6441 PDC2xx_SCR, mode); 6442 6443 /* controller initial state register is OK even without BIOS */ 6444 /* Set DMA mode to IDE DMA compatibility */ 6445 mode = 6446 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 6447 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 6448 DEBUG_PROBE); 6449 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 6450 mode | 0x1); 6451 mode = 6452 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 6453 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 6454 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 6455 mode | 0x1); 6456 } 6457 6458 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6459 cp = &sc->pciide_channels[channel]; 6460 if (pciide_chansetup(sc, channel, interface) == 0) 6461 continue; 6462 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 6463 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 6464 printf("%s: %s ignored (disabled)\n", 6465 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6466 cp->hw_ok = 0; 6467 continue; 6468 } 6469 pciide_map_compat_intr(pa, cp, channel, interface); 6470 if (cp->hw_ok == 0) 6471 continue; 6472 if (PDC_IS_265(sc)) 6473 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6474 pdc20265_pci_intr); 6475 else 6476 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6477 pdc202xx_pci_intr); 6478 if (cp->hw_ok == 0) { 6479 pciide_unmap_compat_intr(pa, cp, channel, interface); 6480 continue; 6481 } 6482 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6483 st &= ~(PDC_IS_262(sc) ? 6484 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6485 pciide_unmap_compat_intr(pa, cp, channel, interface); 6486 } 6487 if (PDC_IS_268(sc)) 6488 pdc20268_setup_channel(&cp->wdc_channel); 6489 else 6490 pdc202xx_setup_channel(&cp->wdc_channel); 6491 } 6492 if (!PDC_IS_268(sc)) { 6493 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6494 "0x%x\n", st), DEBUG_PROBE); 6495 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6496 } 6497 return; 6498 } 6499 6500 void 6501 pdc202xx_setup_channel(struct channel_softc *chp) 6502 { 6503 struct ata_drive_datas *drvp; 6504 int drive; 6505 pcireg_t mode, st; 6506 u_int32_t idedma_ctl, scr, atapi; 6507 struct pciide_channel *cp = (struct pciide_channel *)chp; 6508 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6509 int channel = chp->channel; 6510 6511 /* setup DMA if needed */ 6512 pciide_channel_dma_setup(cp); 6513 6514 idedma_ctl = 0; 6515 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6516 sc->sc_wdcdev.sc_dev.dv_xname, 6517 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6518 DEBUG_PROBE); 6519 6520 /* Per channel settings */ 6521 if (PDC_IS_262(sc)) { 6522 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6523 PDC262_U66); 6524 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6525 /* Check cable */ 6526 if ((st & PDC262_STATE_80P(channel)) != 0 && 6527 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6528 chp->ch_drive[0].UDMA_mode > 2) || 6529 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6530 chp->ch_drive[1].UDMA_mode > 2))) { 6531 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6532 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6533 DEBUG_PROBE); 6534 if (chp->ch_drive[0].UDMA_mode > 2) 6535 chp->ch_drive[0].UDMA_mode = 2; 6536 if (chp->ch_drive[1].UDMA_mode > 2) 6537 chp->ch_drive[1].UDMA_mode = 2; 6538 } 6539 /* Trim UDMA mode */ 6540 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6541 chp->ch_drive[0].UDMA_mode <= 2) || 6542 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6543 chp->ch_drive[1].UDMA_mode <= 2)) { 6544 if (chp->ch_drive[0].UDMA_mode > 2) 6545 chp->ch_drive[0].UDMA_mode = 2; 6546 if (chp->ch_drive[1].UDMA_mode > 2) 6547 chp->ch_drive[1].UDMA_mode = 2; 6548 } 6549 /* Set U66 if needed */ 6550 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6551 chp->ch_drive[0].UDMA_mode > 2) || 6552 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6553 chp->ch_drive[1].UDMA_mode > 2)) 6554 scr |= PDC262_U66_EN(channel); 6555 else 6556 scr &= ~PDC262_U66_EN(channel); 6557 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6558 PDC262_U66, scr); 6559 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6560 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6561 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6562 PDC262_ATAPI(channel))), DEBUG_PROBE); 6563 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6564 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6565 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6566 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6567 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6568 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6569 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6570 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6571 atapi = 0; 6572 else 6573 atapi = PDC262_ATAPI_UDMA; 6574 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6575 PDC262_ATAPI(channel), atapi); 6576 } 6577 } 6578 for (drive = 0; drive < 2; drive++) { 6579 drvp = &chp->ch_drive[drive]; 6580 /* If no drive, skip */ 6581 if ((drvp->drive_flags & DRIVE) == 0) 6582 continue; 6583 mode = 0; 6584 if (drvp->drive_flags & DRIVE_UDMA) { 6585 /* use Ultra/DMA */ 6586 drvp->drive_flags &= ~DRIVE_DMA; 6587 mode = PDC2xx_TIM_SET_MB(mode, 6588 pdc2xx_udma_mb[drvp->UDMA_mode]); 6589 mode = PDC2xx_TIM_SET_MC(mode, 6590 pdc2xx_udma_mc[drvp->UDMA_mode]); 6591 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6592 } else if (drvp->drive_flags & DRIVE_DMA) { 6593 mode = PDC2xx_TIM_SET_MB(mode, 6594 pdc2xx_dma_mb[drvp->DMA_mode]); 6595 mode = PDC2xx_TIM_SET_MC(mode, 6596 pdc2xx_dma_mc[drvp->DMA_mode]); 6597 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6598 } else { 6599 mode = PDC2xx_TIM_SET_MB(mode, 6600 pdc2xx_dma_mb[0]); 6601 mode = PDC2xx_TIM_SET_MC(mode, 6602 pdc2xx_dma_mc[0]); 6603 } 6604 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6605 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6606 if (drvp->drive_flags & DRIVE_ATA) 6607 mode |= PDC2xx_TIM_PRE; 6608 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6609 if (drvp->PIO_mode >= 3) { 6610 mode |= PDC2xx_TIM_IORDY; 6611 if (drive == 0) 6612 mode |= PDC2xx_TIM_IORDYp; 6613 } 6614 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6615 "timings 0x%x\n", 6616 sc->sc_wdcdev.sc_dev.dv_xname, 6617 chp->channel, drive, mode), DEBUG_PROBE); 6618 pci_conf_write(sc->sc_pc, sc->sc_tag, 6619 PDC2xx_TIM(chp->channel, drive), mode); 6620 } 6621 if (idedma_ctl != 0) { 6622 /* Add software bits in status register */ 6623 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6624 IDEDMA_CTL(channel), idedma_ctl); 6625 } 6626 pciide_print_modes(cp); 6627 } 6628 6629 void 6630 pdc20268_setup_channel(struct channel_softc *chp) 6631 { 6632 struct ata_drive_datas *drvp; 6633 int drive, cable; 6634 u_int32_t idedma_ctl; 6635 struct pciide_channel *cp = (struct pciide_channel *)chp; 6636 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6637 int channel = chp->channel; 6638 6639 /* check 80 pins cable */ 6640 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6641 6642 /* setup DMA if needed */ 6643 pciide_channel_dma_setup(cp); 6644 6645 idedma_ctl = 0; 6646 6647 for (drive = 0; drive < 2; drive++) { 6648 drvp = &chp->ch_drive[drive]; 6649 /* If no drive, skip */ 6650 if ((drvp->drive_flags & DRIVE) == 0) 6651 continue; 6652 if (drvp->drive_flags & DRIVE_UDMA) { 6653 /* use Ultra/DMA */ 6654 drvp->drive_flags &= ~DRIVE_DMA; 6655 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6656 if (cable && drvp->UDMA_mode > 2) { 6657 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6658 "cable not detected\n", drvp->drive_name, 6659 sc->sc_wdcdev.sc_dev.dv_xname, 6660 channel, drive), DEBUG_PROBE); 6661 drvp->UDMA_mode = 2; 6662 } 6663 } else if (drvp->drive_flags & DRIVE_DMA) { 6664 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6665 } 6666 } 6667 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6668 if (idedma_ctl != 0) { 6669 /* Add software bits in status register */ 6670 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6671 IDEDMA_CTL(channel), idedma_ctl); 6672 } 6673 pciide_print_modes(cp); 6674 } 6675 6676 int 6677 pdc202xx_pci_intr(void *arg) 6678 { 6679 struct pciide_softc *sc = arg; 6680 struct pciide_channel *cp; 6681 struct channel_softc *wdc_cp; 6682 int i, rv, crv; 6683 u_int32_t scr; 6684 6685 rv = 0; 6686 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6687 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6688 cp = &sc->pciide_channels[i]; 6689 wdc_cp = &cp->wdc_channel; 6690 /* If a compat channel skip. */ 6691 if (cp->compat) 6692 continue; 6693 if (scr & PDC2xx_SCR_INT(i)) { 6694 crv = wdcintr(wdc_cp); 6695 if (crv == 0) 6696 printf("%s:%d: bogus intr (reg 0x%x)\n", 6697 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6698 else 6699 rv = 1; 6700 } 6701 } 6702 return (rv); 6703 } 6704 6705 int 6706 pdc20265_pci_intr(void *arg) 6707 { 6708 struct pciide_softc *sc = arg; 6709 struct pciide_channel *cp; 6710 struct channel_softc *wdc_cp; 6711 int i, rv, crv; 6712 u_int32_t dmastat; 6713 6714 rv = 0; 6715 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6716 cp = &sc->pciide_channels[i]; 6717 wdc_cp = &cp->wdc_channel; 6718 /* If a compat channel skip. */ 6719 if (cp->compat) 6720 continue; 6721 6722 /* 6723 * In case of shared IRQ check that the interrupt 6724 * was actually generated by this channel. 6725 * Only check the channel that is enabled. 6726 */ 6727 if (cp->hw_ok && PDC_IS_268(sc)) { 6728 if ((pdc268_config_read(wdc_cp, 6729 0x0b) & PDC268_INTR) == 0) 6730 continue; 6731 } 6732 6733 /* 6734 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6735 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6736 * So use it instead (requires 2 reg reads instead of 1, 6737 * but we can't do it another way). 6738 */ 6739 dmastat = bus_space_read_1(sc->sc_dma_iot, 6740 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6741 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6742 continue; 6743 6744 crv = wdcintr(wdc_cp); 6745 if (crv == 0) 6746 printf("%s:%d: bogus intr\n", 6747 sc->sc_wdcdev.sc_dev.dv_xname, i); 6748 else 6749 rv = 1; 6750 } 6751 return (rv); 6752 } 6753 6754 void 6755 pdc20262_dma_start(void *v, int channel, int drive) 6756 { 6757 struct pciide_softc *sc = v; 6758 struct pciide_dma_maps *dma_maps = 6759 &sc->pciide_channels[channel].dma_maps[drive]; 6760 u_int8_t clock; 6761 u_int32_t count; 6762 6763 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6764 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6765 PDC262_U66); 6766 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6767 PDC262_U66, clock | PDC262_U66_EN(channel)); 6768 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6769 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6770 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6771 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6772 PDC262_ATAPI(channel), count); 6773 } 6774 6775 pciide_dma_start(v, channel, drive); 6776 } 6777 6778 int 6779 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6780 { 6781 struct pciide_softc *sc = v; 6782 struct pciide_dma_maps *dma_maps = 6783 &sc->pciide_channels[channel].dma_maps[drive]; 6784 u_int8_t clock; 6785 6786 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6787 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6788 PDC262_U66); 6789 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6790 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6791 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6792 PDC262_ATAPI(channel), 0); 6793 } 6794 6795 return (pciide_dma_finish(v, channel, drive, force)); 6796 } 6797 6798 void 6799 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6800 { 6801 struct pciide_channel *cp; 6802 struct channel_softc *wdc_cp; 6803 struct pciide_pdcsata *ps; 6804 int channel, i; 6805 bus_size_t dmasize; 6806 pci_intr_handle_t intrhandle; 6807 const char *intrstr; 6808 6809 /* Allocate memory for private data */ 6810 sc->sc_cookielen = sizeof(*ps); 6811 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 6812 ps = sc->sc_cookie; 6813 6814 /* 6815 * Promise SATA controllers have 3 or 4 channels, 6816 * the usual IDE registers are mapped in I/O space, with offsets. 6817 */ 6818 if (pci_intr_map(pa, &intrhandle) != 0) { 6819 printf(": couldn't map interrupt\n"); 6820 return; 6821 } 6822 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6823 6824 switch (sc->sc_pp->ide_product) { 6825 case PCI_PRODUCT_PROMISE_PDC20318: 6826 case PCI_PRODUCT_PROMISE_PDC20319: 6827 case PCI_PRODUCT_PROMISE_PDC20371: 6828 case PCI_PRODUCT_PROMISE_PDC20375: 6829 case PCI_PRODUCT_PROMISE_PDC20376: 6830 case PCI_PRODUCT_PROMISE_PDC20377: 6831 case PCI_PRODUCT_PROMISE_PDC20378: 6832 case PCI_PRODUCT_PROMISE_PDC20379: 6833 default: 6834 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6835 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6836 sc->sc_wdcdev.sc_dev.dv_xname); 6837 break; 6838 6839 case PCI_PRODUCT_PROMISE_PDC40518: 6840 case PCI_PRODUCT_PROMISE_PDC40519: 6841 case PCI_PRODUCT_PROMISE_PDC40718: 6842 case PCI_PRODUCT_PROMISE_PDC40719: 6843 case PCI_PRODUCT_PROMISE_PDC40779: 6844 case PCI_PRODUCT_PROMISE_PDC20571: 6845 case PCI_PRODUCT_PROMISE_PDC20575: 6846 case PCI_PRODUCT_PROMISE_PDC20579: 6847 case PCI_PRODUCT_PROMISE_PDC20771: 6848 case PCI_PRODUCT_PROMISE_PDC20775: 6849 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6850 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6851 sc->sc_wdcdev.sc_dev.dv_xname); 6852 break; 6853 } 6854 6855 if (sc->sc_pci_ih == NULL) { 6856 printf(": couldn't establish native-PCI interrupt"); 6857 if (intrstr != NULL) 6858 printf(" at %s", intrstr); 6859 printf("\n"); 6860 return; 6861 } 6862 6863 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6864 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6865 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6866 if (!sc->sc_dma_ok) { 6867 printf(": couldn't map bus-master DMA registers\n"); 6868 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6869 return; 6870 } 6871 6872 sc->sc_dmat = pa->pa_dmat; 6873 6874 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6875 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6876 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6877 printf(": couldn't map IDE registers\n"); 6878 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6879 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6880 return; 6881 } 6882 6883 printf(": DMA\n"); 6884 6885 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6886 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6887 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6888 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6889 sc->sc_wdcdev.PIO_cap = 4; 6890 sc->sc_wdcdev.DMA_cap = 2; 6891 sc->sc_wdcdev.UDMA_cap = 6; 6892 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6893 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6894 6895 switch (sc->sc_pp->ide_product) { 6896 case PCI_PRODUCT_PROMISE_PDC20318: 6897 case PCI_PRODUCT_PROMISE_PDC20319: 6898 case PCI_PRODUCT_PROMISE_PDC20371: 6899 case PCI_PRODUCT_PROMISE_PDC20375: 6900 case PCI_PRODUCT_PROMISE_PDC20376: 6901 case PCI_PRODUCT_PROMISE_PDC20377: 6902 case PCI_PRODUCT_PROMISE_PDC20378: 6903 case PCI_PRODUCT_PROMISE_PDC20379: 6904 default: 6905 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6906 sc->sc_wdcdev.nchannels = 6907 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6908 PDC203xx_NCHANNELS : 3; 6909 break; 6910 6911 case PCI_PRODUCT_PROMISE_PDC40518: 6912 case PCI_PRODUCT_PROMISE_PDC40519: 6913 case PCI_PRODUCT_PROMISE_PDC40718: 6914 case PCI_PRODUCT_PROMISE_PDC40719: 6915 case PCI_PRODUCT_PROMISE_PDC40779: 6916 case PCI_PRODUCT_PROMISE_PDC20571: 6917 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6918 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6919 6920 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6921 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6922 6923 break; 6924 case PCI_PRODUCT_PROMISE_PDC20575: 6925 case PCI_PRODUCT_PROMISE_PDC20579: 6926 case PCI_PRODUCT_PROMISE_PDC20771: 6927 case PCI_PRODUCT_PROMISE_PDC20775: 6928 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6929 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6930 6931 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6932 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6933 6934 break; 6935 } 6936 6937 sc->sc_wdcdev.dma_arg = sc; 6938 sc->sc_wdcdev.dma_init = pciide_dma_init; 6939 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6940 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6941 6942 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6943 channel++) { 6944 cp = &sc->pciide_channels[channel]; 6945 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6946 6947 cp->ih = sc->sc_pci_ih; 6948 cp->name = NULL; 6949 cp->wdc_channel.channel = channel; 6950 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6951 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 6952 if (cp->wdc_channel.ch_queue == NULL) { 6953 printf("%s: channel %d: " 6954 "cannot allocate channel queue\n", 6955 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6956 continue; 6957 } 6958 wdc_cp = &cp->wdc_channel; 6959 6960 ps->regs[channel].ctl_iot = ps->ba5_st; 6961 ps->regs[channel].cmd_iot = ps->ba5_st; 6962 6963 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6964 0x0238 + (channel << 7), 1, 6965 &ps->regs[channel].ctl_ioh) != 0) { 6966 printf("%s: couldn't map channel %d ctl regs\n", 6967 sc->sc_wdcdev.sc_dev.dv_xname, 6968 channel); 6969 continue; 6970 } 6971 for (i = 0; i < WDC_NREG; i++) { 6972 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6973 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6974 &ps->regs[channel].cmd_iohs[i]) != 0) { 6975 printf("%s: couldn't map channel %d cmd " 6976 "regs\n", 6977 sc->sc_wdcdev.sc_dev.dv_xname, 6978 channel); 6979 goto loop_end; 6980 } 6981 } 6982 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6983 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6984 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6985 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6986 wdc_cp->data32iot = wdc_cp->cmd_iot = 6987 ps->regs[channel].cmd_iot; 6988 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6989 ps->regs[channel].cmd_iohs[0]; 6990 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6991 6992 /* 6993 * Subregion de busmaster registers. They're spread all over 6994 * the controller's register space :(. They are also 4 bytes 6995 * sized, with some specific extentions in the extra bits. 6996 * It also seems that the IDEDMA_CTL register isn't available. 6997 */ 6998 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6999 0x260 + (channel << 7), 1, 7000 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 7001 printf("%s channel %d: can't subregion DMA " 7002 "registers\n", 7003 sc->sc_wdcdev.sc_dev.dv_xname, channel); 7004 continue; 7005 } 7006 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 7007 0x244 + (channel << 7), 4, 7008 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 7009 printf("%s channel %d: can't subregion DMA " 7010 "registers\n", 7011 sc->sc_wdcdev.sc_dev.dv_xname, channel); 7012 continue; 7013 } 7014 7015 wdcattach(wdc_cp); 7016 bus_space_write_4(sc->sc_dma_iot, 7017 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7018 (bus_space_read_4(sc->sc_dma_iot, 7019 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7020 0) & ~0x00003f9f) | (channel + 1)); 7021 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 7022 (channel + 1) << 2, 0x00000001); 7023 7024 pdc203xx_setup_channel(&cp->wdc_channel); 7025 7026 loop_end: ; 7027 } 7028 7029 printf("%s: using %s for native-PCI interrupt\n", 7030 sc->sc_wdcdev.sc_dev.dv_xname, 7031 intrstr ? intrstr : "unknown interrupt"); 7032 } 7033 7034 void 7035 pdc203xx_setup_channel(struct channel_softc *chp) 7036 { 7037 struct ata_drive_datas *drvp; 7038 struct pciide_channel *cp = (struct pciide_channel *)chp; 7039 int drive, s; 7040 7041 pciide_channel_dma_setup(cp); 7042 7043 for (drive = 0; drive < 2; drive++) { 7044 drvp = &chp->ch_drive[drive]; 7045 if ((drvp->drive_flags & DRIVE) == 0) 7046 continue; 7047 if (drvp->drive_flags & DRIVE_UDMA) { 7048 s = splbio(); 7049 drvp->drive_flags &= ~DRIVE_DMA; 7050 splx(s); 7051 } 7052 } 7053 pciide_print_modes(cp); 7054 } 7055 7056 int 7057 pdc203xx_pci_intr(void *arg) 7058 { 7059 struct pciide_softc *sc = arg; 7060 struct pciide_channel *cp; 7061 struct channel_softc *wdc_cp; 7062 struct pciide_pdcsata *ps = sc->sc_cookie; 7063 int i, rv, crv; 7064 u_int32_t scr; 7065 7066 rv = 0; 7067 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 7068 7069 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7070 cp = &sc->pciide_channels[i]; 7071 wdc_cp = &cp->wdc_channel; 7072 if (scr & (1 << (i + 1))) { 7073 crv = wdcintr(wdc_cp); 7074 if (crv == 0) { 7075 printf("%s:%d: bogus intr (reg 0x%x)\n", 7076 sc->sc_wdcdev.sc_dev.dv_xname, 7077 i, scr); 7078 } else 7079 rv = 1; 7080 } 7081 } 7082 7083 return (rv); 7084 } 7085 7086 int 7087 pdc205xx_pci_intr(void *arg) 7088 { 7089 struct pciide_softc *sc = arg; 7090 struct pciide_channel *cp; 7091 struct channel_softc *wdc_cp; 7092 struct pciide_pdcsata *ps = sc->sc_cookie; 7093 int i, rv, crv; 7094 u_int32_t scr, status; 7095 7096 rv = 0; 7097 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 7098 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 7099 7100 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 7101 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 7102 7103 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7104 cp = &sc->pciide_channels[i]; 7105 wdc_cp = &cp->wdc_channel; 7106 if (scr & (1 << (i + 1))) { 7107 crv = wdcintr(wdc_cp); 7108 if (crv == 0) { 7109 printf("%s:%d: bogus intr (reg 0x%x)\n", 7110 sc->sc_wdcdev.sc_dev.dv_xname, 7111 i, scr); 7112 } else 7113 rv = 1; 7114 } 7115 } 7116 return rv; 7117 } 7118 7119 void 7120 pdc203xx_irqack(struct channel_softc *chp) 7121 { 7122 struct pciide_channel *cp = (struct pciide_channel *)chp; 7123 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7124 struct pciide_pdcsata *ps = sc->sc_cookie; 7125 int chan = chp->channel; 7126 7127 bus_space_write_4(sc->sc_dma_iot, 7128 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 7129 (bus_space_read_4(sc->sc_dma_iot, 7130 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 7131 0) & ~0x00003f9f) | (chan + 1)); 7132 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 7133 (chan + 1) << 2, 0x00000001); 7134 } 7135 7136 void 7137 pdc203xx_dma_start(void *v, int channel, int drive) 7138 { 7139 struct pciide_softc *sc = v; 7140 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7141 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7142 struct pciide_pdcsata *ps = sc->sc_cookie; 7143 7144 /* Write table address */ 7145 bus_space_write_4(sc->sc_dma_iot, 7146 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 7147 dma_maps->dmamap_table->dm_segs[0].ds_addr); 7148 7149 /* Start DMA engine */ 7150 bus_space_write_4(sc->sc_dma_iot, 7151 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7152 (bus_space_read_4(sc->sc_dma_iot, 7153 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7154 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 7155 } 7156 7157 int 7158 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 7159 { 7160 struct pciide_softc *sc = v; 7161 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7162 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7163 struct pciide_pdcsata *ps = sc->sc_cookie; 7164 7165 /* Stop DMA channel */ 7166 bus_space_write_4(sc->sc_dma_iot, 7167 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7168 (bus_space_read_4(sc->sc_dma_iot, 7169 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7170 0) & ~0x80)); 7171 7172 /* Unload the map of the data buffer */ 7173 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 7174 dma_maps->dmamap_xfer->dm_mapsize, 7175 (dma_maps->dma_flags & WDC_DMA_READ) ? 7176 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 7177 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 7178 7179 return (0); 7180 } 7181 7182 u_int8_t 7183 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7184 { 7185 struct pciide_channel *cp = (struct pciide_channel *)chp; 7186 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7187 struct pciide_pdcsata *ps = sc->sc_cookie; 7188 u_int8_t val; 7189 7190 if (reg & _WDC_AUX) { 7191 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 7192 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 7193 } else { 7194 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 7195 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 7196 return (val); 7197 } 7198 } 7199 7200 void 7201 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7202 { 7203 struct pciide_channel *cp = (struct pciide_channel *)chp; 7204 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7205 struct pciide_pdcsata *ps = sc->sc_cookie; 7206 7207 if (reg & _WDC_AUX) 7208 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 7209 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 7210 else 7211 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 7212 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 7213 0, val); 7214 } 7215 7216 void 7217 pdc205xx_do_reset(struct channel_softc *chp) 7218 { 7219 struct pciide_channel *cp = (struct pciide_channel *)chp; 7220 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7221 struct pciide_pdcsata *ps = sc->sc_cookie; 7222 u_int32_t scontrol; 7223 7224 wdc_do_reset(chp); 7225 7226 /* reset SATA */ 7227 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7228 SCONTROL_WRITE(ps, chp->channel, scontrol); 7229 delay(50*1000); 7230 7231 scontrol &= ~SControl_DET_INIT; 7232 SCONTROL_WRITE(ps, chp->channel, scontrol); 7233 delay(50*1000); 7234 } 7235 7236 void 7237 pdc205xx_drv_probe(struct channel_softc *chp) 7238 { 7239 struct pciide_channel *cp = (struct pciide_channel *)chp; 7240 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7241 struct pciide_pdcsata *ps = sc->sc_cookie; 7242 bus_space_handle_t *iohs; 7243 u_int32_t scontrol, sstatus; 7244 u_int16_t scnt, sn, cl, ch; 7245 int s; 7246 7247 SCONTROL_WRITE(ps, chp->channel, 0); 7248 delay(50*1000); 7249 7250 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7251 SCONTROL_WRITE(ps,chp->channel,scontrol); 7252 delay(50*1000); 7253 7254 scontrol &= ~SControl_DET_INIT; 7255 SCONTROL_WRITE(ps,chp->channel,scontrol); 7256 delay(50*1000); 7257 7258 sstatus = SSTATUS_READ(ps,chp->channel); 7259 7260 switch (sstatus & SStatus_DET_mask) { 7261 case SStatus_DET_NODEV: 7262 /* No Device; be silent. */ 7263 break; 7264 7265 case SStatus_DET_DEV_NE: 7266 printf("%s: port %d: device connected, but " 7267 "communication not established\n", 7268 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7269 break; 7270 7271 case SStatus_DET_OFFLINE: 7272 printf("%s: port %d: PHY offline\n", 7273 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7274 break; 7275 7276 case SStatus_DET_DEV: 7277 iohs = ps->regs[chp->channel].cmd_iohs; 7278 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 7279 WDSD_IBM); 7280 delay(10); /* 400ns delay */ 7281 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 7282 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 7283 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 7284 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 7285 #if 0 7286 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7287 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7288 scnt, sn, cl, ch); 7289 #endif 7290 /* 7291 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7292 * cases we get wrong values here, so ignore it. 7293 */ 7294 s = splbio(); 7295 if (cl == 0x14 && ch == 0xeb) 7296 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7297 else 7298 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7299 splx(s); 7300 #if 0 7301 printf("%s: port %d", 7302 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7303 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7304 case 1: 7305 printf(": 1.5Gb/s"); 7306 break; 7307 case 2: 7308 printf(": 3.0Gb/s"); 7309 break; 7310 } 7311 printf("\n"); 7312 #endif 7313 break; 7314 7315 default: 7316 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7317 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7318 } 7319 } 7320 7321 #ifdef notyet 7322 /* 7323 * Inline functions for accessing the timing registers of the 7324 * OPTi controller. 7325 * 7326 * These *MUST* disable interrupts as they need atomic access to 7327 * certain magic registers. Failure to adhere to this *will* 7328 * break things in subtle ways if the wdc registers are accessed 7329 * by an interrupt routine while this magic sequence is executing. 7330 */ 7331 static __inline__ u_int8_t 7332 opti_read_config(struct channel_softc *chp, int reg) 7333 { 7334 u_int8_t rv; 7335 int s = splhigh(); 7336 7337 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7338 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7339 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7340 7341 /* Followed by an 8-bit write of 0x3 to register #2 */ 7342 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7343 7344 /* Now we can read the required register */ 7345 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 7346 7347 /* Restore the real registers */ 7348 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7349 7350 splx(s); 7351 7352 return (rv); 7353 } 7354 7355 static __inline__ void 7356 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 7357 { 7358 int s = splhigh(); 7359 7360 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7361 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7362 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7363 7364 /* Followed by an 8-bit write of 0x3 to register #2 */ 7365 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7366 7367 /* Now we can write the required register */ 7368 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 7369 7370 /* Restore the real registers */ 7371 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7372 7373 splx(s); 7374 } 7375 7376 void 7377 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7378 { 7379 struct pciide_channel *cp; 7380 bus_size_t cmdsize, ctlsize; 7381 pcireg_t interface; 7382 u_int8_t init_ctrl; 7383 int channel; 7384 7385 printf(": DMA"); 7386 /* 7387 * XXXSCW: 7388 * There seem to be a couple of buggy revisions/implementations 7389 * of the OPTi pciide chipset. This kludge seems to fix one of 7390 * the reported problems (NetBSD PR/11644) but still fails for the 7391 * other (NetBSD PR/13151), although the latter may be due to other 7392 * issues too... 7393 */ 7394 if (sc->sc_rev <= 0x12) { 7395 printf(" (disabled)"); 7396 sc->sc_dma_ok = 0; 7397 sc->sc_wdcdev.cap = 0; 7398 } else { 7399 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 7400 pciide_mapreg_dma(sc, pa); 7401 } 7402 7403 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 7404 sc->sc_wdcdev.PIO_cap = 4; 7405 if (sc->sc_dma_ok) { 7406 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7407 sc->sc_wdcdev.irqack = pciide_irqack; 7408 sc->sc_wdcdev.DMA_cap = 2; 7409 } 7410 sc->sc_wdcdev.set_modes = opti_setup_channel; 7411 7412 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7413 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7414 7415 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 7416 OPTI_REG_INIT_CONTROL); 7417 7418 interface = PCI_INTERFACE(pa->pa_class); 7419 7420 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7421 7422 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7423 cp = &sc->pciide_channels[channel]; 7424 if (pciide_chansetup(sc, channel, interface) == 0) 7425 continue; 7426 if (channel == 1 && 7427 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 7428 printf("%s: %s ignored (disabled)\n", 7429 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7430 cp->hw_ok = 0; 7431 continue; 7432 } 7433 pciide_map_compat_intr(pa, cp, channel, interface); 7434 if (cp->hw_ok == 0) 7435 continue; 7436 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7437 pciide_pci_intr); 7438 if (cp->hw_ok == 0) { 7439 pciide_unmap_compat_intr(pa, cp, channel, interface); 7440 continue; 7441 } 7442 opti_setup_channel(&cp->wdc_channel); 7443 } 7444 } 7445 7446 void 7447 opti_setup_channel(struct channel_softc *chp) 7448 { 7449 struct ata_drive_datas *drvp; 7450 struct pciide_channel *cp = (struct pciide_channel *)chp; 7451 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7452 int drive, spd; 7453 int mode[2]; 7454 u_int8_t rv, mr; 7455 7456 /* 7457 * The `Delay' and `Address Setup Time' fields of the 7458 * Miscellaneous Register are always zero initially. 7459 */ 7460 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 7461 mr &= ~(OPTI_MISC_DELAY_MASK | 7462 OPTI_MISC_ADDR_SETUP_MASK | 7463 OPTI_MISC_INDEX_MASK); 7464 7465 /* Prime the control register before setting timing values */ 7466 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 7467 7468 /* Determine the clockrate of the PCIbus the chip is attached to */ 7469 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7470 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7471 7472 /* setup DMA if needed */ 7473 pciide_channel_dma_setup(cp); 7474 7475 for (drive = 0; drive < 2; drive++) { 7476 drvp = &chp->ch_drive[drive]; 7477 /* If no drive, skip */ 7478 if ((drvp->drive_flags & DRIVE) == 0) { 7479 mode[drive] = -1; 7480 continue; 7481 } 7482 7483 if ((drvp->drive_flags & DRIVE_DMA)) { 7484 /* 7485 * Timings will be used for both PIO and DMA, 7486 * so adjust DMA mode if needed 7487 */ 7488 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7489 drvp->PIO_mode = drvp->DMA_mode + 2; 7490 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7491 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7492 drvp->PIO_mode - 2 : 0; 7493 if (drvp->DMA_mode == 0) 7494 drvp->PIO_mode = 0; 7495 7496 mode[drive] = drvp->DMA_mode + 5; 7497 } else 7498 mode[drive] = drvp->PIO_mode; 7499 7500 if (drive && mode[0] >= 0 && 7501 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7502 /* 7503 * Can't have two drives using different values 7504 * for `Address Setup Time'. 7505 * Slow down the faster drive to compensate. 7506 */ 7507 int d = (opti_tim_as[spd][mode[0]] > 7508 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7509 7510 mode[d] = mode[1-d]; 7511 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7512 chp->ch_drive[d].DMA_mode = 0; 7513 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7514 } 7515 } 7516 7517 for (drive = 0; drive < 2; drive++) { 7518 int m; 7519 if ((m = mode[drive]) < 0) 7520 continue; 7521 7522 /* Set the Address Setup Time and select appropriate index */ 7523 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7524 rv |= OPTI_MISC_INDEX(drive); 7525 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7526 7527 /* Set the pulse width and recovery timing parameters */ 7528 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7529 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7530 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7531 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7532 7533 /* Set the Enhanced Mode register appropriately */ 7534 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7535 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7536 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7537 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7538 } 7539 7540 /* Finally, enable the timings */ 7541 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7542 7543 pciide_print_modes(cp); 7544 } 7545 #endif 7546 7547 void 7548 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7549 { 7550 struct pciide_channel *cp; 7551 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7552 pcitag_t pcib_tag; 7553 int channel; 7554 bus_size_t cmdsize, ctlsize; 7555 7556 printf(": DMA"); 7557 pciide_mapreg_dma(sc, pa); 7558 printf("\n"); 7559 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7560 WDC_CAPABILITY_MODE; 7561 7562 if (sc->sc_dma_ok) { 7563 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7564 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7565 sc->sc_wdcdev.irqack = pciide_irqack; 7566 } 7567 sc->sc_wdcdev.PIO_cap = 4; 7568 sc->sc_wdcdev.DMA_cap = 2; 7569 switch (sc->sc_pp->ide_product) { 7570 case PCI_PRODUCT_RCC_OSB4_IDE: 7571 sc->sc_wdcdev.UDMA_cap = 2; 7572 break; 7573 case PCI_PRODUCT_RCC_CSB5_IDE: 7574 if (sc->sc_rev < 0x92) 7575 sc->sc_wdcdev.UDMA_cap = 4; 7576 else 7577 sc->sc_wdcdev.UDMA_cap = 5; 7578 break; 7579 case PCI_PRODUCT_RCC_CSB6_IDE: 7580 sc->sc_wdcdev.UDMA_cap = 4; 7581 break; 7582 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7583 case PCI_PRODUCT_RCC_HT_1000_IDE: 7584 sc->sc_wdcdev.UDMA_cap = 5; 7585 break; 7586 } 7587 7588 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7589 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7590 sc->sc_wdcdev.nchannels = 7591 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7592 7593 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7594 cp = &sc->pciide_channels[channel]; 7595 if (pciide_chansetup(sc, channel, interface) == 0) 7596 continue; 7597 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7598 serverworks_pci_intr); 7599 if (cp->hw_ok == 0) 7600 return; 7601 pciide_map_compat_intr(pa, cp, channel, interface); 7602 if (cp->hw_ok == 0) 7603 return; 7604 serverworks_setup_channel(&cp->wdc_channel); 7605 } 7606 7607 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7608 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7609 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7610 } 7611 7612 void 7613 serverworks_setup_channel(struct channel_softc *chp) 7614 { 7615 struct ata_drive_datas *drvp; 7616 struct pciide_channel *cp = (struct pciide_channel *)chp; 7617 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7618 int channel = chp->channel; 7619 int drive, unit; 7620 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7621 u_int32_t idedma_ctl; 7622 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7623 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7624 7625 /* setup DMA if needed */ 7626 pciide_channel_dma_setup(cp); 7627 7628 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7629 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7630 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7631 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7632 7633 pio_time &= ~(0xffff << (16 * channel)); 7634 dma_time &= ~(0xffff << (16 * channel)); 7635 pio_mode &= ~(0xff << (8 * channel + 16)); 7636 udma_mode &= ~(0xff << (8 * channel + 16)); 7637 udma_mode &= ~(3 << (2 * channel)); 7638 7639 idedma_ctl = 0; 7640 7641 /* Per drive settings */ 7642 for (drive = 0; drive < 2; drive++) { 7643 drvp = &chp->ch_drive[drive]; 7644 /* If no drive, skip */ 7645 if ((drvp->drive_flags & DRIVE) == 0) 7646 continue; 7647 unit = drive + 2 * channel; 7648 /* add timing values, setup DMA if needed */ 7649 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7650 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7651 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7652 (drvp->drive_flags & DRIVE_UDMA)) { 7653 /* use Ultra/DMA, check for 80-pin cable */ 7654 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7655 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7656 PCI_SUBSYS_ID_REG)) & 7657 (1 << (14 + channel))) == 0) { 7658 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7659 "cable not detected\n", drvp->drive_name, 7660 sc->sc_wdcdev.sc_dev.dv_xname, 7661 channel, drive), DEBUG_PROBE); 7662 drvp->UDMA_mode = 2; 7663 } 7664 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7665 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7666 udma_mode |= 1 << unit; 7667 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7668 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7669 (drvp->drive_flags & DRIVE_DMA)) { 7670 /* use Multiword DMA */ 7671 drvp->drive_flags &= ~DRIVE_UDMA; 7672 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7673 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7674 } else { 7675 /* PIO only */ 7676 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7677 } 7678 } 7679 7680 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7681 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7682 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7683 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7684 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7685 7686 if (idedma_ctl != 0) { 7687 /* Add software bits in status register */ 7688 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7689 IDEDMA_CTL(channel), idedma_ctl); 7690 } 7691 pciide_print_modes(cp); 7692 } 7693 7694 int 7695 serverworks_pci_intr(void *arg) 7696 { 7697 struct pciide_softc *sc = arg; 7698 struct pciide_channel *cp; 7699 struct channel_softc *wdc_cp; 7700 int rv = 0; 7701 int dmastat, i, crv; 7702 7703 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7704 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7705 IDEDMA_CTL(i)); 7706 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7707 IDEDMA_CTL_INTR) 7708 continue; 7709 cp = &sc->pciide_channels[i]; 7710 wdc_cp = &cp->wdc_channel; 7711 crv = wdcintr(wdc_cp); 7712 if (crv == 0) { 7713 printf("%s:%d: bogus intr\n", 7714 sc->sc_wdcdev.sc_dev.dv_xname, i); 7715 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7716 IDEDMA_CTL(i), dmastat); 7717 } else 7718 rv = 1; 7719 } 7720 return (rv); 7721 } 7722 7723 void 7724 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7725 { 7726 struct pciide_channel *cp; 7727 pci_intr_handle_t intrhandle; 7728 const char *intrstr; 7729 int channel; 7730 struct pciide_svwsata *ss; 7731 7732 /* Allocate memory for private data */ 7733 sc->sc_cookielen = sizeof(*ss); 7734 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 7735 ss = sc->sc_cookie; 7736 7737 /* The 4-port version has a dummy second function. */ 7738 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7739 PCI_MAPREG_START + 0x14) == 0) { 7740 printf("\n"); 7741 return; 7742 } 7743 7744 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7745 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7746 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7747 printf(": unable to map BA5 register space\n"); 7748 return; 7749 } 7750 7751 printf(": DMA"); 7752 svwsata_mapreg_dma(sc, pa); 7753 printf("\n"); 7754 7755 if (sc->sc_dma_ok) { 7756 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7757 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7758 sc->sc_wdcdev.irqack = pciide_irqack; 7759 } 7760 sc->sc_wdcdev.PIO_cap = 4; 7761 sc->sc_wdcdev.DMA_cap = 2; 7762 sc->sc_wdcdev.UDMA_cap = 6; 7763 7764 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7765 sc->sc_wdcdev.nchannels = 4; 7766 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7767 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7768 sc->sc_wdcdev.set_modes = sata_setup_channel; 7769 7770 /* We can use SControl and SStatus to probe for drives. */ 7771 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7772 7773 /* Map and establish the interrupt handler. */ 7774 if(pci_intr_map(pa, &intrhandle) != 0) { 7775 printf("%s: couldn't map native-PCI interrupt\n", 7776 sc->sc_wdcdev.sc_dev.dv_xname); 7777 return; 7778 } 7779 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7780 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7781 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7782 if (sc->sc_pci_ih != NULL) { 7783 printf("%s: using %s for native-PCI interrupt\n", 7784 sc->sc_wdcdev.sc_dev.dv_xname, 7785 intrstr ? intrstr : "unknown interrupt"); 7786 } else { 7787 printf("%s: couldn't establish native-PCI interrupt", 7788 sc->sc_wdcdev.sc_dev.dv_xname); 7789 if (intrstr != NULL) 7790 printf(" at %s", intrstr); 7791 printf("\n"); 7792 return; 7793 } 7794 7795 switch (sc->sc_pp->ide_product) { 7796 case PCI_PRODUCT_RCC_K2_SATA: 7797 bus_space_write_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1, 7798 bus_space_read_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1) 7799 & ~0x00040000); 7800 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7801 SVWSATA_SIM, 0); 7802 break; 7803 } 7804 7805 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7806 cp = &sc->pciide_channels[channel]; 7807 if (pciide_chansetup(sc, channel, 0) == 0) 7808 continue; 7809 svwsata_mapchan(cp); 7810 sata_setup_channel(&cp->wdc_channel); 7811 } 7812 } 7813 7814 void 7815 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7816 { 7817 struct pciide_svwsata *ss = sc->sc_cookie; 7818 7819 sc->sc_wdcdev.dma_arg = sc; 7820 sc->sc_wdcdev.dma_init = pciide_dma_init; 7821 sc->sc_wdcdev.dma_start = pciide_dma_start; 7822 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7823 7824 /* XXX */ 7825 sc->sc_dma_iot = ss->ba5_st; 7826 sc->sc_dma_ioh = ss->ba5_sh; 7827 7828 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7829 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7830 sc->sc_dmactl_read = svwsata_dmactl_read; 7831 sc->sc_dmactl_write = svwsata_dmactl_write; 7832 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7833 7834 /* DMA registers all set up! */ 7835 sc->sc_dmat = pa->pa_dmat; 7836 sc->sc_dma_ok = 1; 7837 } 7838 7839 u_int8_t 7840 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7841 { 7842 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7843 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7844 } 7845 7846 void 7847 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7848 { 7849 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7850 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7851 } 7852 7853 u_int8_t 7854 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7855 { 7856 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7857 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7858 } 7859 7860 void 7861 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7862 { 7863 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7864 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7865 } 7866 7867 void 7868 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7869 { 7870 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7871 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7872 } 7873 7874 void 7875 svwsata_mapchan(struct pciide_channel *cp) 7876 { 7877 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7878 struct channel_softc *wdc_cp = &cp->wdc_channel; 7879 struct pciide_svwsata *ss = sc->sc_cookie; 7880 7881 cp->compat = 0; 7882 cp->ih = sc->sc_pci_ih; 7883 7884 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7885 (wdc_cp->channel << 8) + SVWSATA_TF0, 7886 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7887 printf("%s: couldn't map %s cmd regs\n", 7888 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7889 return; 7890 } 7891 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7892 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7893 &wdc_cp->ctl_ioh) != 0) { 7894 printf("%s: couldn't map %s ctl regs\n", 7895 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7896 return; 7897 } 7898 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7899 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7900 wdc_cp->ch_flags |= WDCF_DMA_BEFORE_CMD; 7901 wdcattach(wdc_cp); 7902 } 7903 7904 void 7905 svwsata_drv_probe(struct channel_softc *chp) 7906 { 7907 struct pciide_channel *cp = (struct pciide_channel *)chp; 7908 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7909 struct pciide_svwsata *ss = sc->sc_cookie; 7910 int channel = chp->channel; 7911 uint32_t scontrol, sstatus; 7912 uint8_t scnt, sn, cl, ch; 7913 int s; 7914 7915 /* 7916 * Request communication initialization sequence, any speed. 7917 * Performing this is the equivalent of an ATA Reset. 7918 */ 7919 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7920 7921 /* 7922 * XXX We don't yet support SATA power management; disable all 7923 * power management state transitions. 7924 */ 7925 scontrol |= SControl_IPM_NONE; 7926 7927 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7928 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7929 delay(50 * 1000); 7930 scontrol &= ~SControl_DET_INIT; 7931 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7932 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7933 delay(100 * 1000); 7934 7935 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7936 (channel << 8) + SVWSATA_SSTATUS); 7937 #if 0 7938 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7939 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7940 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7941 (channel << 8) + SVWSATA_SSTATUS)); 7942 #endif 7943 switch (sstatus & SStatus_DET_mask) { 7944 case SStatus_DET_NODEV: 7945 /* No device; be silent. */ 7946 break; 7947 7948 case SStatus_DET_DEV_NE: 7949 printf("%s: port %d: device connected, but " 7950 "communication not established\n", 7951 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7952 break; 7953 7954 case SStatus_DET_OFFLINE: 7955 printf("%s: port %d: PHY offline\n", 7956 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7957 break; 7958 7959 case SStatus_DET_DEV: 7960 /* 7961 * XXX ATAPI detection doesn't currently work. Don't 7962 * XXX know why. But, it's not like the standard method 7963 * XXX can detect an ATAPI device connected via a SATA/PATA 7964 * XXX bridge, so at least this is no worse. --thorpej 7965 */ 7966 if (chp->_vtbl != NULL) 7967 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7968 else 7969 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7970 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7971 delay(10); /* 400ns delay */ 7972 /* Save register contents. */ 7973 if (chp->_vtbl != NULL) { 7974 scnt = CHP_READ_REG(chp, wdr_seccnt); 7975 sn = CHP_READ_REG(chp, wdr_sector); 7976 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7977 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7978 } else { 7979 scnt = bus_space_read_1(chp->cmd_iot, 7980 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7981 sn = bus_space_read_1(chp->cmd_iot, 7982 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7983 cl = bus_space_read_1(chp->cmd_iot, 7984 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7985 ch = bus_space_read_1(chp->cmd_iot, 7986 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7987 } 7988 #if 0 7989 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7990 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7991 scnt, sn, cl, ch); 7992 #endif 7993 /* 7994 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7995 * cases we get wrong values here, so ignore it. 7996 */ 7997 s = splbio(); 7998 if (cl == 0x14 && ch == 0xeb) 7999 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 8000 else 8001 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 8002 splx(s); 8003 8004 printf("%s: port %d", 8005 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 8006 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 8007 case 1: 8008 printf(": 1.5Gb/s"); 8009 break; 8010 case 2: 8011 printf(": 3.0Gb/s"); 8012 break; 8013 } 8014 printf("\n"); 8015 break; 8016 8017 default: 8018 printf("%s: port %d: unknown SStatus: 0x%08x\n", 8019 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 8020 } 8021 } 8022 8023 u_int8_t 8024 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 8025 { 8026 if (reg & _WDC_AUX) { 8027 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 8028 (reg & _WDC_REGMASK) << 2)); 8029 } else { 8030 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 8031 (reg & _WDC_REGMASK) << 2)); 8032 } 8033 } 8034 8035 void 8036 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 8037 { 8038 if (reg & _WDC_AUX) { 8039 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 8040 (reg & _WDC_REGMASK) << 2, val); 8041 } else { 8042 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 8043 (reg & _WDC_REGMASK) << 2, val); 8044 } 8045 } 8046 8047 void 8048 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 8049 { 8050 if (reg & _WDC_AUX) { 8051 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 8052 (reg & _WDC_REGMASK) << 2, val); 8053 } else { 8054 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 8055 (reg & _WDC_REGMASK) << 2, val); 8056 } 8057 } 8058 8059 #define ACARD_IS_850(sc) \ 8060 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 8061 8062 void 8063 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8064 { 8065 struct pciide_channel *cp; 8066 int i; 8067 pcireg_t interface; 8068 bus_size_t cmdsize, ctlsize; 8069 8070 /* 8071 * when the chip is in native mode it identifies itself as a 8072 * 'misc mass storage'. Fake interface in this case. 8073 */ 8074 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 8075 interface = PCI_INTERFACE(pa->pa_class); 8076 } else { 8077 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8078 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8079 } 8080 8081 printf(": DMA"); 8082 pciide_mapreg_dma(sc, pa); 8083 printf("\n"); 8084 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8085 WDC_CAPABILITY_MODE; 8086 8087 if (sc->sc_dma_ok) { 8088 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8089 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8090 sc->sc_wdcdev.irqack = pciide_irqack; 8091 } 8092 sc->sc_wdcdev.PIO_cap = 4; 8093 sc->sc_wdcdev.DMA_cap = 2; 8094 switch (sc->sc_pp->ide_product) { 8095 case PCI_PRODUCT_ACARD_ATP850U: 8096 sc->sc_wdcdev.UDMA_cap = 2; 8097 break; 8098 case PCI_PRODUCT_ACARD_ATP860: 8099 case PCI_PRODUCT_ACARD_ATP860A: 8100 sc->sc_wdcdev.UDMA_cap = 4; 8101 break; 8102 case PCI_PRODUCT_ACARD_ATP865A: 8103 case PCI_PRODUCT_ACARD_ATP865R: 8104 sc->sc_wdcdev.UDMA_cap = 6; 8105 break; 8106 } 8107 8108 sc->sc_wdcdev.set_modes = acard_setup_channel; 8109 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8110 sc->sc_wdcdev.nchannels = 2; 8111 8112 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8113 cp = &sc->pciide_channels[i]; 8114 if (pciide_chansetup(sc, i, interface) == 0) 8115 continue; 8116 if (interface & PCIIDE_INTERFACE_PCI(i)) { 8117 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 8118 &ctlsize, pciide_pci_intr); 8119 } else { 8120 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 8121 &cmdsize, &ctlsize); 8122 } 8123 if (cp->hw_ok == 0) 8124 return; 8125 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 8126 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 8127 wdcattach(&cp->wdc_channel); 8128 acard_setup_channel(&cp->wdc_channel); 8129 } 8130 if (!ACARD_IS_850(sc)) { 8131 u_int32_t reg; 8132 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 8133 reg &= ~ATP860_CTRL_INT; 8134 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 8135 } 8136 } 8137 8138 void 8139 acard_setup_channel(struct channel_softc *chp) 8140 { 8141 struct ata_drive_datas *drvp; 8142 struct pciide_channel *cp = (struct pciide_channel *)chp; 8143 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8144 int channel = chp->channel; 8145 int drive; 8146 u_int32_t idetime, udma_mode; 8147 u_int32_t idedma_ctl; 8148 8149 /* setup DMA if needed */ 8150 pciide_channel_dma_setup(cp); 8151 8152 if (ACARD_IS_850(sc)) { 8153 idetime = 0; 8154 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 8155 udma_mode &= ~ATP850_UDMA_MASK(channel); 8156 } else { 8157 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 8158 idetime &= ~ATP860_SETTIME_MASK(channel); 8159 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 8160 udma_mode &= ~ATP860_UDMA_MASK(channel); 8161 } 8162 8163 idedma_ctl = 0; 8164 8165 /* Per drive settings */ 8166 for (drive = 0; drive < 2; drive++) { 8167 drvp = &chp->ch_drive[drive]; 8168 /* If no drive, skip */ 8169 if ((drvp->drive_flags & DRIVE) == 0) 8170 continue; 8171 /* add timing values, setup DMA if needed */ 8172 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 8173 (drvp->drive_flags & DRIVE_UDMA)) { 8174 /* use Ultra/DMA */ 8175 if (ACARD_IS_850(sc)) { 8176 idetime |= ATP850_SETTIME(drive, 8177 acard_act_udma[drvp->UDMA_mode], 8178 acard_rec_udma[drvp->UDMA_mode]); 8179 udma_mode |= ATP850_UDMA_MODE(channel, drive, 8180 acard_udma_conf[drvp->UDMA_mode]); 8181 } else { 8182 idetime |= ATP860_SETTIME(channel, drive, 8183 acard_act_udma[drvp->UDMA_mode], 8184 acard_rec_udma[drvp->UDMA_mode]); 8185 udma_mode |= ATP860_UDMA_MODE(channel, drive, 8186 acard_udma_conf[drvp->UDMA_mode]); 8187 } 8188 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8189 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 8190 (drvp->drive_flags & DRIVE_DMA)) { 8191 /* use Multiword DMA */ 8192 drvp->drive_flags &= ~DRIVE_UDMA; 8193 if (ACARD_IS_850(sc)) { 8194 idetime |= ATP850_SETTIME(drive, 8195 acard_act_dma[drvp->DMA_mode], 8196 acard_rec_dma[drvp->DMA_mode]); 8197 } else { 8198 idetime |= ATP860_SETTIME(channel, drive, 8199 acard_act_dma[drvp->DMA_mode], 8200 acard_rec_dma[drvp->DMA_mode]); 8201 } 8202 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8203 } else { 8204 /* PIO only */ 8205 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 8206 if (ACARD_IS_850(sc)) { 8207 idetime |= ATP850_SETTIME(drive, 8208 acard_act_pio[drvp->PIO_mode], 8209 acard_rec_pio[drvp->PIO_mode]); 8210 } else { 8211 idetime |= ATP860_SETTIME(channel, drive, 8212 acard_act_pio[drvp->PIO_mode], 8213 acard_rec_pio[drvp->PIO_mode]); 8214 } 8215 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 8216 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 8217 | ATP8x0_CTRL_EN(channel)); 8218 } 8219 } 8220 8221 if (idedma_ctl != 0) { 8222 /* Add software bits in status register */ 8223 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8224 IDEDMA_CTL(channel), idedma_ctl); 8225 } 8226 pciide_print_modes(cp); 8227 8228 if (ACARD_IS_850(sc)) { 8229 pci_conf_write(sc->sc_pc, sc->sc_tag, 8230 ATP850_IDETIME(channel), idetime); 8231 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 8232 } else { 8233 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 8234 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 8235 } 8236 } 8237 8238 void 8239 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8240 { 8241 struct pciide_channel *cp; 8242 int channel; 8243 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8244 bus_size_t cmdsize, ctlsize; 8245 u_int32_t conf; 8246 8247 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8248 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8249 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8250 8251 printf(": DMA"); 8252 pciide_mapreg_dma(sc, pa); 8253 8254 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8255 WDC_CAPABILITY_MODE; 8256 if (sc->sc_dma_ok) { 8257 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8258 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8259 sc->sc_wdcdev.irqack = pciide_irqack; 8260 } 8261 sc->sc_wdcdev.PIO_cap = 4; 8262 sc->sc_wdcdev.DMA_cap = 2; 8263 switch (sc->sc_pp->ide_product) { 8264 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 8265 sc->sc_wdcdev.UDMA_cap = 5; 8266 break; 8267 default: 8268 sc->sc_wdcdev.UDMA_cap = 6; 8269 } 8270 sc->sc_wdcdev.set_modes = nforce_setup_channel; 8271 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8272 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8273 8274 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8275 8276 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8277 cp = &sc->pciide_channels[channel]; 8278 8279 if (pciide_chansetup(sc, channel, interface) == 0) 8280 continue; 8281 8282 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 8283 printf("%s: %s ignored (disabled)\n", 8284 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8285 cp->hw_ok = 0; 8286 continue; 8287 } 8288 8289 pciide_map_compat_intr(pa, cp, channel, interface); 8290 if (cp->hw_ok == 0) 8291 continue; 8292 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8293 nforce_pci_intr); 8294 if (cp->hw_ok == 0) { 8295 pciide_unmap_compat_intr(pa, cp, channel, interface); 8296 continue; 8297 } 8298 8299 if (pciide_chan_candisable(cp)) { 8300 conf &= ~NFORCE_CHAN_EN(channel); 8301 pciide_unmap_compat_intr(pa, cp, channel, interface); 8302 continue; 8303 } 8304 8305 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8306 } 8307 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8308 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8309 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8310 } 8311 8312 void 8313 nforce_setup_channel(struct channel_softc *chp) 8314 { 8315 struct ata_drive_datas *drvp; 8316 int drive, mode; 8317 u_int32_t idedma_ctl; 8318 struct pciide_channel *cp = (struct pciide_channel *)chp; 8319 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8320 int channel = chp->channel; 8321 u_int32_t conf, piodmatim, piotim, udmatim; 8322 8323 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8324 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 8325 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 8326 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 8327 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 8328 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8329 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8330 8331 /* Setup DMA if needed */ 8332 pciide_channel_dma_setup(cp); 8333 8334 /* Clear all bits for this channel */ 8335 idedma_ctl = 0; 8336 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 8337 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 8338 8339 /* Per channel settings */ 8340 for (drive = 0; drive < 2; drive++) { 8341 drvp = &chp->ch_drive[drive]; 8342 8343 /* If no drive, skip */ 8344 if ((drvp->drive_flags & DRIVE) == 0) 8345 continue; 8346 8347 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8348 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8349 /* Setup UltraDMA mode */ 8350 drvp->drive_flags &= ~DRIVE_DMA; 8351 8352 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 8353 nforce_udma[drvp->UDMA_mode]) | 8354 NFORCE_UDMA_EN(channel, drive) | 8355 NFORCE_UDMA_ENM(channel, drive); 8356 8357 mode = drvp->PIO_mode; 8358 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8359 (drvp->drive_flags & DRIVE_DMA) != 0) { 8360 /* Setup multiword DMA mode */ 8361 drvp->drive_flags &= ~DRIVE_UDMA; 8362 8363 /* mode = min(pio, dma + 2) */ 8364 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8365 mode = drvp->PIO_mode; 8366 else 8367 mode = drvp->DMA_mode + 2; 8368 } else { 8369 mode = drvp->PIO_mode; 8370 goto pio; 8371 } 8372 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8373 8374 pio: 8375 /* Setup PIO mode */ 8376 if (mode <= 2) { 8377 drvp->DMA_mode = 0; 8378 drvp->PIO_mode = 0; 8379 mode = 0; 8380 } else { 8381 drvp->PIO_mode = mode; 8382 drvp->DMA_mode = mode - 2; 8383 } 8384 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 8385 nforce_pio[mode]); 8386 } 8387 8388 if (idedma_ctl != 0) { 8389 /* Add software bits in status register */ 8390 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8391 IDEDMA_CTL(channel), idedma_ctl); 8392 } 8393 8394 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 8395 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8396 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8397 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 8398 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 8399 8400 pciide_print_modes(cp); 8401 } 8402 8403 int 8404 nforce_pci_intr(void *arg) 8405 { 8406 struct pciide_softc *sc = arg; 8407 struct pciide_channel *cp; 8408 struct channel_softc *wdc_cp; 8409 int i, rv, crv; 8410 u_int32_t dmastat; 8411 8412 rv = 0; 8413 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8414 cp = &sc->pciide_channels[i]; 8415 wdc_cp = &cp->wdc_channel; 8416 8417 /* Skip compat channel */ 8418 if (cp->compat) 8419 continue; 8420 8421 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8422 IDEDMA_CTL(i)); 8423 if ((dmastat & IDEDMA_CTL_INTR) == 0) 8424 continue; 8425 8426 crv = wdcintr(wdc_cp); 8427 if (crv == 0) 8428 printf("%s:%d: bogus intr\n", 8429 sc->sc_wdcdev.sc_dev.dv_xname, i); 8430 else 8431 rv = 1; 8432 } 8433 return (rv); 8434 } 8435 8436 void 8437 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8438 { 8439 struct pciide_channel *cp; 8440 bus_size_t cmdsize, ctlsize; 8441 pcireg_t interface; 8442 int channel; 8443 8444 printf(": DMA"); 8445 #ifdef PCIIDE_I31244_DISABLEDMA 8446 if (sc->sc_rev == 0) { 8447 printf(" disabled due to rev. 0"); 8448 sc->sc_dma_ok = 0; 8449 } else 8450 #endif 8451 pciide_mapreg_dma(sc, pa); 8452 printf("\n"); 8453 8454 /* 8455 * XXX Configure LEDs to show activity. 8456 */ 8457 8458 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8459 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 8460 sc->sc_wdcdev.PIO_cap = 4; 8461 if (sc->sc_dma_ok) { 8462 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8463 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8464 sc->sc_wdcdev.irqack = pciide_irqack; 8465 sc->sc_wdcdev.DMA_cap = 2; 8466 sc->sc_wdcdev.UDMA_cap = 6; 8467 } 8468 sc->sc_wdcdev.set_modes = sata_setup_channel; 8469 8470 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8471 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8472 8473 interface = PCI_INTERFACE(pa->pa_class); 8474 8475 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8476 cp = &sc->pciide_channels[channel]; 8477 if (pciide_chansetup(sc, channel, interface) == 0) 8478 continue; 8479 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8480 pciide_pci_intr); 8481 if (cp->hw_ok == 0) 8482 continue; 8483 pciide_map_compat_intr(pa, cp, channel, interface); 8484 sata_setup_channel(&cp->wdc_channel); 8485 } 8486 } 8487 8488 void 8489 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8490 { 8491 struct pciide_channel *cp; 8492 int channel; 8493 pcireg_t interface; 8494 bus_size_t cmdsize, ctlsize; 8495 pcireg_t cfg, modectl; 8496 8497 /* 8498 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8499 */ 8500 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8501 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8502 8503 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8504 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8505 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8506 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8507 modectl & IT_MODE_MASK), DEBUG_PROBE); 8508 8509 printf(": DMA"); 8510 pciide_mapreg_dma(sc, pa); 8511 8512 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8513 WDC_CAPABILITY_MODE; 8514 if (sc->sc_dma_ok) { 8515 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8516 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8517 sc->sc_wdcdev.irqack = pciide_irqack; 8518 } 8519 sc->sc_wdcdev.PIO_cap = 4; 8520 sc->sc_wdcdev.DMA_cap = 2; 8521 sc->sc_wdcdev.UDMA_cap = 6; 8522 8523 sc->sc_wdcdev.set_modes = ite_setup_channel; 8524 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8525 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8526 8527 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8528 8529 /* Disable RAID */ 8530 modectl &= ~IT_MODE_RAID1; 8531 /* Disable CPU firmware mode */ 8532 modectl &= ~IT_MODE_CPU; 8533 8534 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8535 8536 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8537 cp = &sc->pciide_channels[channel]; 8538 8539 if (pciide_chansetup(sc, channel, interface) == 0) 8540 continue; 8541 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8542 pciide_pci_intr); 8543 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8544 } 8545 8546 /* Re-read configuration registers after channels setup */ 8547 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8548 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8549 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8550 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8551 modectl & IT_MODE_MASK), DEBUG_PROBE); 8552 } 8553 8554 void 8555 ite_setup_channel(struct channel_softc *chp) 8556 { 8557 struct ata_drive_datas *drvp; 8558 int drive, mode; 8559 u_int32_t idedma_ctl; 8560 struct pciide_channel *cp = (struct pciide_channel *)chp; 8561 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8562 int channel = chp->channel; 8563 pcireg_t cfg, modectl; 8564 pcireg_t tim; 8565 8566 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8567 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8568 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8569 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8570 channel, tim), DEBUG_PROBE); 8571 8572 /* Setup DMA if needed */ 8573 pciide_channel_dma_setup(cp); 8574 8575 /* Clear all bits for this channel */ 8576 idedma_ctl = 0; 8577 8578 /* Per channel settings */ 8579 for (drive = 0; drive < 2; drive++) { 8580 drvp = &chp->ch_drive[drive]; 8581 8582 /* If no drive, skip */ 8583 if ((drvp->drive_flags & DRIVE) == 0) 8584 continue; 8585 8586 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8587 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8588 /* Setup UltraDMA mode */ 8589 drvp->drive_flags &= ~DRIVE_DMA; 8590 modectl &= ~IT_MODE_DMA(channel, drive); 8591 8592 #if 0 8593 /* Check cable, works only in CPU firmware mode */ 8594 if (drvp->UDMA_mode > 2 && 8595 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8596 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8597 "80-wire cable not detected\n", 8598 drvp->drive_name, 8599 sc->sc_wdcdev.sc_dev.dv_xname, 8600 channel, drive), DEBUG_PROBE); 8601 drvp->UDMA_mode = 2; 8602 } 8603 #endif 8604 8605 if (drvp->UDMA_mode >= 5) 8606 tim |= IT_TIM_UDMA5(drive); 8607 else 8608 tim &= ~IT_TIM_UDMA5(drive); 8609 8610 mode = drvp->PIO_mode; 8611 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8612 (drvp->drive_flags & DRIVE_DMA) != 0) { 8613 /* Setup multiword DMA mode */ 8614 drvp->drive_flags &= ~DRIVE_UDMA; 8615 modectl |= IT_MODE_DMA(channel, drive); 8616 8617 /* mode = min(pio, dma + 2) */ 8618 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8619 mode = drvp->PIO_mode; 8620 else 8621 mode = drvp->DMA_mode + 2; 8622 } else { 8623 mode = drvp->PIO_mode; 8624 goto pio; 8625 } 8626 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8627 8628 pio: 8629 /* Setup PIO mode */ 8630 if (mode <= 2) { 8631 drvp->DMA_mode = 0; 8632 drvp->PIO_mode = 0; 8633 mode = 0; 8634 } else { 8635 drvp->PIO_mode = mode; 8636 drvp->DMA_mode = mode - 2; 8637 } 8638 8639 /* Enable IORDY if PIO mode >= 3 */ 8640 if (drvp->PIO_mode >= 3) 8641 cfg |= IT_CFG_IORDY(channel); 8642 } 8643 8644 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8645 tim), DEBUG_PROBE); 8646 8647 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8648 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8649 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8650 8651 if (idedma_ctl != 0) { 8652 /* Add software bits in status register */ 8653 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8654 IDEDMA_CTL(channel), idedma_ctl); 8655 } 8656 8657 pciide_print_modes(cp); 8658 } 8659 8660 void 8661 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8662 { 8663 struct pciide_channel *cp; 8664 int channel; 8665 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8666 bus_size_t cmdsize, ctlsize; 8667 8668 printf(": DMA"); 8669 pciide_mapreg_dma(sc, pa); 8670 8671 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8672 WDC_CAPABILITY_MODE; 8673 if (sc->sc_dma_ok) { 8674 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8675 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8676 sc->sc_wdcdev.irqack = pciide_irqack; 8677 } 8678 sc->sc_wdcdev.PIO_cap = 4; 8679 sc->sc_wdcdev.DMA_cap = 2; 8680 sc->sc_wdcdev.UDMA_cap = 6; 8681 8682 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8683 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8684 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8685 8686 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8687 8688 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8689 cp = &sc->pciide_channels[channel]; 8690 if (pciide_chansetup(sc, channel, interface) == 0) 8691 continue; 8692 pciide_map_compat_intr(pa, cp, channel, interface); 8693 if (cp->hw_ok == 0) 8694 continue; 8695 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8696 pciide_pci_intr); 8697 if (cp->hw_ok == 0) { 8698 pciide_unmap_compat_intr(pa, cp, channel, interface); 8699 continue; 8700 } 8701 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8702 } 8703 } 8704 8705 void 8706 ixp_setup_channel(struct channel_softc *chp) 8707 { 8708 struct ata_drive_datas *drvp; 8709 int drive, mode; 8710 u_int32_t idedma_ctl; 8711 struct pciide_channel *cp = (struct pciide_channel*)chp; 8712 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8713 int channel = chp->channel; 8714 pcireg_t udma, mdma_timing, pio, pio_timing; 8715 8716 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8717 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8718 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8719 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8720 8721 /* Setup DMA if needed */ 8722 pciide_channel_dma_setup(cp); 8723 8724 idedma_ctl = 0; 8725 8726 /* Per channel settings */ 8727 for (drive = 0; drive < 2; drive++) { 8728 drvp = &chp->ch_drive[drive]; 8729 8730 /* If no drive, skip */ 8731 if ((drvp->drive_flags & DRIVE) == 0) 8732 continue; 8733 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8734 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8735 /* Setup UltraDMA mode */ 8736 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8737 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8738 IXP_SET_MODE(udma, chp->channel, drive, 8739 drvp->UDMA_mode); 8740 mode = drvp->PIO_mode; 8741 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8742 (drvp->drive_flags & DRIVE_DMA) != 0) { 8743 /* Setup multiword DMA mode */ 8744 drvp->drive_flags &= ~DRIVE_UDMA; 8745 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8746 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8747 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8748 ixp_mdma_timings[drvp->DMA_mode]); 8749 8750 /* mode = min(pio, dma + 2) */ 8751 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8752 mode = drvp->PIO_mode; 8753 else 8754 mode = drvp->DMA_mode + 2; 8755 } else { 8756 mode = drvp->PIO_mode; 8757 } 8758 8759 /* Setup PIO mode */ 8760 drvp->PIO_mode = mode; 8761 if (mode < 2) 8762 drvp->DMA_mode = 0; 8763 else 8764 drvp->DMA_mode = mode - 2; 8765 /* 8766 * Set PIO mode and timings 8767 * Linux driver avoids PIO mode 1, let's do it too. 8768 */ 8769 if (drvp->PIO_mode == 1) 8770 drvp->PIO_mode = 0; 8771 8772 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8773 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8774 ixp_pio_timings[drvp->PIO_mode]); 8775 } 8776 8777 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8778 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8779 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8780 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8781 8782 if (idedma_ctl != 0) { 8783 /* Add software bits in status register */ 8784 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8785 IDEDMA_CTL(channel), idedma_ctl); 8786 } 8787 8788 pciide_print_modes(cp); 8789 } 8790 8791 void 8792 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8793 { 8794 struct pciide_channel *cp; 8795 int channel; 8796 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8797 bus_size_t cmdsize, ctlsize; 8798 u_int32_t conf; 8799 8800 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8801 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8802 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8803 8804 printf(": DMA"); 8805 pciide_mapreg_dma(sc, pa); 8806 8807 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8808 WDC_CAPABILITY_MODE; 8809 if (sc->sc_dma_ok) { 8810 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8811 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8812 sc->sc_wdcdev.irqack = pciide_irqack; 8813 } 8814 sc->sc_wdcdev.PIO_cap = 4; 8815 sc->sc_wdcdev.DMA_cap = 2; 8816 sc->sc_wdcdev.UDMA_cap = 6; 8817 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8818 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8819 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8820 8821 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8822 8823 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8824 cp = &sc->pciide_channels[channel]; 8825 8826 if (pciide_chansetup(sc, channel, interface) == 0) 8827 continue; 8828 8829 #if 0 8830 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8831 printf("%s: %s ignored (disabled)\n", 8832 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8833 cp->hw_ok = 0; 8834 continue; 8835 } 8836 #endif 8837 8838 pciide_map_compat_intr(pa, cp, channel, interface); 8839 if (cp->hw_ok == 0) 8840 continue; 8841 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8842 pciide_pci_intr); 8843 if (cp->hw_ok == 0) { 8844 pciide_unmap_compat_intr(pa, cp, channel, interface); 8845 continue; 8846 } 8847 8848 if (pciide_chan_candisable(cp)) { 8849 conf &= ~JMICRON_CHAN_EN(channel); 8850 pciide_unmap_compat_intr(pa, cp, channel, interface); 8851 continue; 8852 } 8853 8854 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8855 } 8856 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8857 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8858 pci_conf_write(sc->sc_pc, sc->sc_tag, JMICRON_CONF, conf); 8859 } 8860 8861 void 8862 jmicron_setup_channel(struct channel_softc *chp) 8863 { 8864 struct ata_drive_datas *drvp; 8865 int drive, mode; 8866 u_int32_t idedma_ctl; 8867 struct pciide_channel *cp = (struct pciide_channel *)chp; 8868 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8869 int channel = chp->channel; 8870 u_int32_t conf; 8871 8872 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8873 8874 /* Setup DMA if needed */ 8875 pciide_channel_dma_setup(cp); 8876 8877 /* Clear all bits for this channel */ 8878 idedma_ctl = 0; 8879 8880 /* Per channel settings */ 8881 for (drive = 0; drive < 2; drive++) { 8882 drvp = &chp->ch_drive[drive]; 8883 8884 /* If no drive, skip */ 8885 if ((drvp->drive_flags & DRIVE) == 0) 8886 continue; 8887 8888 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8889 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8890 /* Setup UltraDMA mode */ 8891 drvp->drive_flags &= ~DRIVE_DMA; 8892 8893 /* see if cable is up to scratch */ 8894 if ((conf & JMICRON_CONF_40PIN) && 8895 (drvp->UDMA_mode > 2)) 8896 drvp->UDMA_mode = 2; 8897 8898 mode = drvp->PIO_mode; 8899 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8900 (drvp->drive_flags & DRIVE_DMA) != 0) { 8901 /* Setup multiword DMA mode */ 8902 drvp->drive_flags &= ~DRIVE_UDMA; 8903 8904 /* mode = min(pio, dma + 2) */ 8905 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8906 mode = drvp->PIO_mode; 8907 else 8908 mode = drvp->DMA_mode + 2; 8909 } else { 8910 mode = drvp->PIO_mode; 8911 goto pio; 8912 } 8913 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8914 8915 pio: 8916 /* Setup PIO mode */ 8917 if (mode <= 2) { 8918 drvp->DMA_mode = 0; 8919 drvp->PIO_mode = 0; 8920 } else { 8921 drvp->PIO_mode = mode; 8922 drvp->DMA_mode = mode - 2; 8923 } 8924 } 8925 8926 if (idedma_ctl != 0) { 8927 /* Add software bits in status register */ 8928 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8929 IDEDMA_CTL(channel), idedma_ctl); 8930 } 8931 8932 pciide_print_modes(cp); 8933 } 8934 8935 void 8936 phison_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8937 { 8938 struct pciide_channel *cp; 8939 int channel; 8940 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8941 bus_size_t cmdsize, ctlsize; 8942 8943 sc->chip_unmap = default_chip_unmap; 8944 8945 printf(": DMA"); 8946 pciide_mapreg_dma(sc, pa); 8947 8948 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8949 WDC_CAPABILITY_MODE; 8950 if (sc->sc_dma_ok) { 8951 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8952 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8953 sc->sc_wdcdev.irqack = pciide_irqack; 8954 } 8955 sc->sc_wdcdev.PIO_cap = 4; 8956 sc->sc_wdcdev.DMA_cap = 2; 8957 sc->sc_wdcdev.UDMA_cap = 5; 8958 sc->sc_wdcdev.set_modes = phison_setup_channel; 8959 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8960 sc->sc_wdcdev.nchannels = 1; 8961 8962 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8963 8964 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8965 cp = &sc->pciide_channels[channel]; 8966 8967 if (pciide_chansetup(sc, channel, interface) == 0) 8968 continue; 8969 8970 pciide_map_compat_intr(pa, cp, channel, interface); 8971 if (cp->hw_ok == 0) 8972 continue; 8973 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8974 pciide_pci_intr); 8975 if (cp->hw_ok == 0) { 8976 pciide_unmap_compat_intr(pa, cp, channel, interface); 8977 continue; 8978 } 8979 8980 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8981 } 8982 } 8983 8984 void 8985 phison_setup_channel(struct channel_softc *chp) 8986 { 8987 struct ata_drive_datas *drvp; 8988 int drive, mode; 8989 u_int32_t idedma_ctl; 8990 struct pciide_channel *cp = (struct pciide_channel *)chp; 8991 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8992 int channel = chp->channel; 8993 8994 /* Setup DMA if needed */ 8995 pciide_channel_dma_setup(cp); 8996 8997 /* Clear all bits for this channel */ 8998 idedma_ctl = 0; 8999 9000 /* Per channel settings */ 9001 for (drive = 0; drive < 2; drive++) { 9002 drvp = &chp->ch_drive[drive]; 9003 9004 /* If no drive, skip */ 9005 if ((drvp->drive_flags & DRIVE) == 0) 9006 continue; 9007 9008 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 9009 (drvp->drive_flags & DRIVE_UDMA) != 0) { 9010 /* Setup UltraDMA mode */ 9011 drvp->drive_flags &= ~DRIVE_DMA; 9012 mode = drvp->PIO_mode; 9013 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 9014 (drvp->drive_flags & DRIVE_DMA) != 0) { 9015 /* Setup multiword DMA mode */ 9016 drvp->drive_flags &= ~DRIVE_UDMA; 9017 9018 /* mode = min(pio, dma + 2) */ 9019 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 9020 mode = drvp->PIO_mode; 9021 else 9022 mode = drvp->DMA_mode + 2; 9023 } else { 9024 mode = drvp->PIO_mode; 9025 goto pio; 9026 } 9027 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 9028 9029 pio: 9030 /* Setup PIO mode */ 9031 if (mode <= 2) { 9032 drvp->DMA_mode = 0; 9033 drvp->PIO_mode = 0; 9034 } else { 9035 drvp->PIO_mode = mode; 9036 drvp->DMA_mode = mode - 2; 9037 } 9038 } 9039 9040 if (idedma_ctl != 0) { 9041 /* Add software bits in status register */ 9042 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 9043 IDEDMA_CTL(channel), idedma_ctl); 9044 } 9045 9046 pciide_print_modes(cp); 9047 } 9048 9049 void 9050 sch_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 9051 { 9052 struct pciide_channel *cp; 9053 int channel; 9054 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 9055 bus_size_t cmdsize, ctlsize; 9056 9057 printf(": DMA"); 9058 pciide_mapreg_dma(sc, pa); 9059 9060 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 9061 WDC_CAPABILITY_MODE; 9062 if (sc->sc_dma_ok) { 9063 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 9064 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 9065 sc->sc_wdcdev.irqack = pciide_irqack; 9066 } 9067 sc->sc_wdcdev.PIO_cap = 4; 9068 sc->sc_wdcdev.DMA_cap = 2; 9069 sc->sc_wdcdev.UDMA_cap = 5; 9070 sc->sc_wdcdev.set_modes = sch_setup_channel; 9071 sc->sc_wdcdev.channels = sc->wdc_chanarray; 9072 sc->sc_wdcdev.nchannels = 1; 9073 9074 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 9075 9076 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 9077 cp = &sc->pciide_channels[channel]; 9078 9079 if (pciide_chansetup(sc, channel, interface) == 0) 9080 continue; 9081 9082 pciide_map_compat_intr(pa, cp, channel, interface); 9083 if (cp->hw_ok == 0) 9084 continue; 9085 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 9086 pciide_pci_intr); 9087 if (cp->hw_ok == 0) { 9088 pciide_unmap_compat_intr(pa, cp, channel, interface); 9089 continue; 9090 } 9091 9092 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 9093 } 9094 } 9095 9096 void 9097 sch_setup_channel(struct channel_softc *chp) 9098 { 9099 struct ata_drive_datas *drvp; 9100 int drive, mode; 9101 u_int32_t tim, timaddr; 9102 struct pciide_channel *cp = (struct pciide_channel *)chp; 9103 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 9104 9105 /* Setup DMA if needed */ 9106 pciide_channel_dma_setup(cp); 9107 9108 /* Per channel settings */ 9109 for (drive = 0; drive < 2; drive++) { 9110 drvp = &chp->ch_drive[drive]; 9111 9112 /* If no drive, skip */ 9113 if ((drvp->drive_flags & DRIVE) == 0) 9114 continue; 9115 9116 timaddr = (drive == 0) ? SCH_D0TIM : SCH_D1TIM; 9117 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, timaddr); 9118 tim &= ~SCH_TIM_MASK; 9119 9120 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 9121 (drvp->drive_flags & DRIVE_UDMA) != 0) { 9122 /* Setup UltraDMA mode */ 9123 drvp->drive_flags &= ~DRIVE_DMA; 9124 9125 mode = drvp->PIO_mode; 9126 tim |= (drvp->UDMA_mode << 16) | SCH_TIM_SYNCDMA; 9127 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 9128 (drvp->drive_flags & DRIVE_DMA) != 0) { 9129 /* Setup multiword DMA mode */ 9130 drvp->drive_flags &= ~DRIVE_UDMA; 9131 9132 tim &= ~SCH_TIM_SYNCDMA; 9133 9134 /* mode = min(pio, dma + 2) */ 9135 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 9136 mode = drvp->PIO_mode; 9137 else 9138 mode = drvp->DMA_mode + 2; 9139 } else { 9140 mode = drvp->PIO_mode; 9141 goto pio; 9142 } 9143 9144 pio: 9145 /* Setup PIO mode */ 9146 if (mode <= 2) { 9147 drvp->DMA_mode = 0; 9148 drvp->PIO_mode = 0; 9149 } else { 9150 drvp->PIO_mode = mode; 9151 drvp->DMA_mode = mode - 2; 9152 } 9153 tim |= (drvp->DMA_mode << 8) | (drvp->PIO_mode); 9154 pci_conf_write(sc->sc_pc, sc->sc_tag, timaddr, tim); 9155 } 9156 9157 pciide_print_modes(cp); 9158 } 9159 9160 void 9161 rdc_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 9162 { 9163 struct pciide_channel *cp; 9164 int channel; 9165 u_int32_t patr; 9166 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 9167 bus_size_t cmdsize, ctlsize; 9168 9169 printf(": DMA"); 9170 pciide_mapreg_dma(sc, pa); 9171 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 9172 if (sc->sc_dma_ok) { 9173 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 9174 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 9175 sc->sc_wdcdev.irqack = pciide_irqack; 9176 sc->sc_wdcdev.dma_init = pciide_dma_init; 9177 } 9178 sc->sc_wdcdev.PIO_cap = 4; 9179 sc->sc_wdcdev.DMA_cap = 2; 9180 sc->sc_wdcdev.UDMA_cap = 5; 9181 sc->sc_wdcdev.set_modes = rdc_setup_channel; 9182 sc->sc_wdcdev.channels = sc->wdc_chanarray; 9183 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 9184 9185 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 9186 9187 WDCDEBUG_PRINT(("rdc_chip_map: old PATR=0x%x, " 9188 "PSD1ATR=0x%x, UDCCR=0x%x, IIOCR=0x%x\n", 9189 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR), 9190 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR), 9191 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR), 9192 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR)), 9193 DEBUG_PROBE); 9194 9195 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 9196 cp = &sc->pciide_channels[channel]; 9197 9198 if (pciide_chansetup(sc, channel, interface) == 0) 9199 continue; 9200 patr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR); 9201 if ((patr & RDCIDE_PATR_EN(channel)) == 0) { 9202 printf("%s: %s ignored (disabled)\n", 9203 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 9204 cp->hw_ok = 0; 9205 continue; 9206 } 9207 pciide_map_compat_intr(pa, cp, channel, interface); 9208 if (cp->hw_ok == 0) 9209 continue; 9210 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 9211 pciide_pci_intr); 9212 if (cp->hw_ok == 0) 9213 goto next; 9214 if (pciide_chan_candisable(cp)) { 9215 patr &= ~RDCIDE_PATR_EN(channel); 9216 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PATR, 9217 patr); 9218 } 9219 if (cp->hw_ok == 0) 9220 goto next; 9221 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 9222 next: 9223 if (cp->hw_ok == 0) 9224 pciide_unmap_compat_intr(pa, cp, channel, interface); 9225 } 9226 9227 WDCDEBUG_PRINT(("rdc_chip_map: PATR=0x%x, " 9228 "PSD1ATR=0x%x, UDCCR=0x%x, IIOCR=0x%x\n", 9229 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR), 9230 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR), 9231 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR), 9232 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR)), 9233 DEBUG_PROBE); 9234 } 9235 9236 void 9237 rdc_setup_channel(struct channel_softc *chp) 9238 { 9239 u_int8_t drive; 9240 u_int32_t patr, psd1atr, udccr, iiocr; 9241 struct pciide_channel *cp = (struct pciide_channel *)chp; 9242 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 9243 struct ata_drive_datas *drvp; 9244 9245 patr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR); 9246 psd1atr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR); 9247 udccr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR); 9248 iiocr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR); 9249 9250 /* setup DMA */ 9251 pciide_channel_dma_setup(cp); 9252 9253 /* clear modes */ 9254 patr = patr & (RDCIDE_PATR_EN(0) | RDCIDE_PATR_EN(1)); 9255 psd1atr &= ~RDCIDE_PSD1ATR_SETUP_MASK(chp->channel); 9256 psd1atr &= ~RDCIDE_PSD1ATR_HOLD_MASK(chp->channel); 9257 for (drive = 0; drive < 2; drive++) { 9258 udccr &= ~RDCIDE_UDCCR_EN(chp->channel, drive); 9259 udccr &= ~RDCIDE_UDCCR_TIM_MASK(chp->channel, drive); 9260 iiocr &= ~RDCIDE_IIOCR_CLK_MASK(chp->channel, drive); 9261 } 9262 /* now setup modes */ 9263 for (drive = 0; drive < 2; drive++) { 9264 drvp = &cp->wdc_channel.ch_drive[drive]; 9265 if ((drvp->drive_flags & DRIVE) == 0) 9266 continue; 9267 if (drvp->drive_flags & DRIVE_ATAPI) 9268 patr |= RDCIDE_PATR_ATA(chp->channel, drive); 9269 if (drive == 0) { 9270 patr |= RDCIDE_PATR_SETUP(rdcide_setup[drvp->PIO_mode], 9271 chp->channel); 9272 patr |= RDCIDE_PATR_HOLD(rdcide_hold[drvp->PIO_mode], 9273 chp->channel); 9274 } else { 9275 patr |= RDCIDE_PATR_DEV1_TEN(chp->channel); 9276 psd1atr |= RDCIDE_PSD1ATR_SETUP( 9277 rdcide_setup[drvp->PIO_mode], 9278 chp->channel); 9279 psd1atr |= RDCIDE_PSD1ATR_HOLD( 9280 rdcide_hold[drvp->PIO_mode], 9281 chp->channel); 9282 } 9283 if (drvp->PIO_mode > 0) { 9284 patr |= RDCIDE_PATR_FTIM(chp->channel, drive); 9285 patr |= RDCIDE_PATR_IORDY(chp->channel, drive); 9286 } 9287 if (drvp->drive_flags & DRIVE_DMA) 9288 patr |= RDCIDE_PATR_DMAEN(chp->channel, drive); 9289 if ((drvp->drive_flags & DRIVE_UDMA) == 0) 9290 continue; 9291 9292 if ((iiocr & RDCIDE_IIOCR_CABLE(chp->channel, drive)) == 0 9293 && drvp->UDMA_mode > 2) 9294 drvp->UDMA_mode = 2; 9295 udccr |= RDCIDE_UDCCR_EN(chp->channel, drive); 9296 udccr |= RDCIDE_UDCCR_TIM(rdcide_udmatim[drvp->UDMA_mode], 9297 chp->channel, drive); 9298 iiocr |= RDCIDE_IIOCR_CLK(rdcide_udmaclk[drvp->UDMA_mode], 9299 chp->channel, drive); 9300 } 9301 9302 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PATR, patr); 9303 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR, psd1atr); 9304 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR, udccr); 9305 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR, iiocr); 9306 } 9307