1 /* $OpenBSD: pciide.c,v 1.358 2017/07/12 13:40:59 mikeb Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions 34 * are met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 3. All advertising materials mentioning features or use of this software 41 * must display the following acknowledgement: 42 * This product includes software developed by Christopher G. Demetriou 43 * for the NetBSD Project. 44 * 4. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * PCI IDE controller driver. 61 * 62 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 63 * sys/dev/pci/ppb.c, revision 1.16). 64 * 65 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 66 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 67 * 5/16/94" from the PCI SIG. 68 * 69 */ 70 71 #define DEBUG_DMA 0x01 72 #define DEBUG_XFERS 0x02 73 #define DEBUG_FUNCS 0x08 74 #define DEBUG_PROBE 0x10 75 76 #ifdef WDCDEBUG 77 #ifndef WDCDEBUG_PCIIDE_MASK 78 #define WDCDEBUG_PCIIDE_MASK 0x00 79 #endif 80 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 81 #define WDCDEBUG_PRINT(args, level) do { \ 82 if ((wdcdebug_pciide_mask & (level)) != 0) \ 83 printf args; \ 84 } while (0) 85 #else 86 #define WDCDEBUG_PRINT(args, level) 87 #endif 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/device.h> 91 #include <sys/malloc.h> 92 #include <sys/endian.h> 93 94 #include <machine/bus.h> 95 96 #include <dev/ata/atavar.h> 97 #include <dev/ata/satareg.h> 98 #include <dev/ic/wdcreg.h> 99 #include <dev/ic/wdcvar.h> 100 101 #include <dev/pci/pcireg.h> 102 #include <dev/pci/pcivar.h> 103 #include <dev/pci/pcidevs.h> 104 105 #include <dev/pci/pciidereg.h> 106 #include <dev/pci/pciidevar.h> 107 #include <dev/pci/pciide_piix_reg.h> 108 #include <dev/pci/pciide_amd_reg.h> 109 #include <dev/pci/pciide_apollo_reg.h> 110 #include <dev/pci/pciide_cmd_reg.h> 111 #include <dev/pci/pciide_sii3112_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/pciide_acard_reg.h> 119 #include <dev/pci/pciide_natsemi_reg.h> 120 #include <dev/pci/pciide_nforce_reg.h> 121 #include <dev/pci/pciide_ite_reg.h> 122 #include <dev/pci/pciide_ixp_reg.h> 123 #include <dev/pci/pciide_svwsata_reg.h> 124 #include <dev/pci/pciide_jmicron_reg.h> 125 #include <dev/pci/pciide_rdc_reg.h> 126 #include <dev/pci/cy82c693var.h> 127 128 int pciide_skip_ata; 129 int pciide_skip_atapi; 130 131 /* functions for reading/writing 8-bit PCI registers */ 132 133 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 134 int); 135 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 136 int, u_int8_t); 137 138 u_int8_t 139 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 140 { 141 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 142 ((reg & 0x03) * 8) & 0xff); 143 } 144 145 void 146 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 147 { 148 pcireg_t pcival; 149 150 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 151 pcival &= ~(0xff << ((reg & 0x03) * 8)); 152 pcival |= (val << ((reg & 0x03) * 8)); 153 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 154 } 155 156 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 157 158 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 159 void sata_setup_channel(struct channel_softc *); 160 161 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 162 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 163 void piix_setup_channel(struct channel_softc *); 164 void piix3_4_setup_channel(struct channel_softc *); 165 void piix_timing_debug(struct pciide_softc *); 166 167 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 168 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 169 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 170 171 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 172 void amd756_setup_channel(struct channel_softc *); 173 174 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 175 void apollo_setup_channel(struct channel_softc *); 176 177 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 178 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 179 void cmd0643_9_setup_channel(struct channel_softc *); 180 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 181 void cmd680_setup_channel(struct channel_softc *); 182 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 183 void cmd_channel_map(struct pci_attach_args *, 184 struct pciide_softc *, int); 185 int cmd_pci_intr(void *); 186 void cmd646_9_irqack(struct channel_softc *); 187 188 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 189 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 190 void sii3112_setup_channel(struct channel_softc *); 191 void sii3112_drv_probe(struct channel_softc *); 192 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 193 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 194 int sii3114_chansetup(struct pciide_softc *, int); 195 void sii3114_mapchan(struct pciide_channel *); 196 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 197 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 198 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 199 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 200 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 201 202 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 203 void cy693_setup_channel(struct channel_softc *); 204 205 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 206 void sis_setup_channel(struct channel_softc *); 207 void sis96x_setup_channel(struct channel_softc *); 208 int sis_hostbr_match(struct pci_attach_args *); 209 int sis_south_match(struct pci_attach_args *); 210 211 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 212 void natsemi_setup_channel(struct channel_softc *); 213 int natsemi_pci_intr(void *); 214 void natsemi_irqack(struct channel_softc *); 215 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 216 void ns_scx200_setup_channel(struct channel_softc *); 217 218 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 219 void acer_setup_channel(struct channel_softc *); 220 int acer_pci_intr(void *); 221 int acer_dma_init(void *, int, int, void *, size_t, int); 222 223 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 224 void pdc202xx_setup_channel(struct channel_softc *); 225 void pdc20268_setup_channel(struct channel_softc *); 226 int pdc202xx_pci_intr(void *); 227 int pdc20265_pci_intr(void *); 228 void pdc20262_dma_start(void *, int, int); 229 int pdc20262_dma_finish(void *, int, int, int); 230 231 u_int8_t pdc268_config_read(struct channel_softc *, int); 232 233 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 234 void pdc203xx_setup_channel(struct channel_softc *); 235 int pdc203xx_pci_intr(void *); 236 void pdc203xx_irqack(struct channel_softc *); 237 void pdc203xx_dma_start(void *,int ,int); 238 int pdc203xx_dma_finish(void *, int, int, int); 239 int pdc205xx_pci_intr(void *); 240 void pdc205xx_do_reset(struct channel_softc *); 241 void pdc205xx_drv_probe(struct channel_softc *); 242 243 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 244 void opti_setup_channel(struct channel_softc *); 245 246 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 247 void hpt_setup_channel(struct channel_softc *); 248 int hpt_pci_intr(void *); 249 250 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 251 void acard_setup_channel(struct channel_softc *); 252 253 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 254 void serverworks_setup_channel(struct channel_softc *); 255 int serverworks_pci_intr(void *); 256 257 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 258 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 259 void svwsata_mapchan(struct pciide_channel *); 260 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 261 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 262 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 263 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 264 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 265 void svwsata_drv_probe(struct channel_softc *); 266 267 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 268 void nforce_setup_channel(struct channel_softc *); 269 int nforce_pci_intr(void *); 270 271 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 272 273 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 274 void ite_setup_channel(struct channel_softc *); 275 276 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 277 void ixp_setup_channel(struct channel_softc *); 278 279 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 280 void jmicron_setup_channel(struct channel_softc *); 281 282 void phison_chip_map(struct pciide_softc *, struct pci_attach_args *); 283 void phison_setup_channel(struct channel_softc *); 284 285 void sch_chip_map(struct pciide_softc *, struct pci_attach_args *); 286 void sch_setup_channel(struct channel_softc *); 287 288 void rdc_chip_map(struct pciide_softc *, struct pci_attach_args *); 289 void rdc_setup_channel(struct channel_softc *); 290 291 struct pciide_product_desc { 292 u_int32_t ide_product; 293 u_short ide_flags; 294 /* map and setup chip, probe drives */ 295 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 296 }; 297 298 /* Flags for ide_flags */ 299 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 300 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 301 302 /* Default product description for devices not known from this controller */ 303 const struct pciide_product_desc default_product_desc = { 304 0, /* Generic PCI IDE controller */ 305 0, 306 default_chip_map 307 }; 308 309 const struct pciide_product_desc pciide_intel_products[] = { 310 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 311 0, 312 artisea_chip_map 313 }, 314 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 315 0, 316 default_chip_map 317 }, 318 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 319 0, 320 piix_chip_map 321 }, 322 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 323 0, 324 piix_chip_map 325 }, 326 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 327 0, 328 piix_chip_map 329 }, 330 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 331 0, 332 piix_chip_map 333 }, 334 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 335 0, 336 piix_chip_map 337 }, 338 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 339 0, 340 piix_chip_map 341 }, 342 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 343 0, 344 piix_chip_map 345 }, 346 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 347 0, 348 piix_chip_map 349 }, 350 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 351 0, 352 piix_chip_map 353 }, 354 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 355 0, 356 piix_chip_map 357 }, 358 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 359 0, 360 piix_chip_map 361 }, 362 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 363 0, 364 piix_chip_map 365 }, 366 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 367 0, 368 piix_chip_map 369 }, 370 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 371 0, 372 piix_chip_map 373 }, 374 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 375 0, 376 piix_chip_map 377 }, 378 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 379 0, 380 piix_chip_map 381 }, 382 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 383 0, 384 piix_chip_map 385 }, 386 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 387 0, 388 piix_chip_map 389 }, 390 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 391 0, 392 piixsata_chip_map 393 }, 394 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 395 0, 396 piixsata_chip_map 397 }, 398 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 399 0, 400 piix_chip_map 401 }, 402 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 403 0, 404 piixsata_chip_map 405 }, 406 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 407 0, 408 piixsata_chip_map 409 }, 410 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 411 0, 412 piix_chip_map 413 }, 414 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 415 0, 416 piix_chip_map 417 }, 418 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 419 0, 420 piixsata_chip_map 421 }, 422 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 423 0, 424 piixsata_chip_map 425 }, 426 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 427 0, 428 piixsata_chip_map 429 }, 430 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 431 0, 432 piix_chip_map 433 }, 434 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 435 0, 436 piixsata_chip_map 437 }, 438 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 439 0, 440 piixsata_chip_map 441 }, 442 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 443 0, 444 piixsata_chip_map 445 }, 446 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 447 0, 448 piixsata_chip_map 449 }, 450 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 451 0, 452 piixsata_chip_map 453 }, 454 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 455 0, 456 piixsata_chip_map 457 }, 458 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 459 0, 460 piixsata_chip_map 461 }, 462 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 463 0, 464 piixsata_chip_map 465 }, 466 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 467 0, 468 piixsata_chip_map 469 }, 470 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 471 0, 472 piixsata_chip_map 473 }, 474 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 475 0, 476 piixsata_chip_map 477 }, 478 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 479 0, 480 piixsata_chip_map 481 }, 482 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 483 0, 484 piixsata_chip_map 485 }, 486 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 487 0, 488 piixsata_chip_map 489 }, 490 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 491 0, 492 piix_chip_map 493 }, 494 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 495 0, 496 piixsata_chip_map 497 }, 498 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 499 0, 500 piixsata_chip_map 501 }, 502 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 503 0, 504 piixsata_chip_map 505 }, 506 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 507 0, 508 piixsata_chip_map 509 }, 510 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 511 0, 512 piixsata_chip_map 513 }, 514 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 515 0, 516 piixsata_chip_map 517 }, 518 { PCI_PRODUCT_INTEL_82801JD_SATA_1, /* Intel 82801JD (ICH10) SATA */ 519 0, 520 piixsata_chip_map 521 }, 522 { PCI_PRODUCT_INTEL_82801JD_SATA_2, /* Intel 82801JD (ICH10) SATA */ 523 0, 524 piixsata_chip_map 525 }, 526 { PCI_PRODUCT_INTEL_82801JI_SATA_1, /* Intel 82801JI (ICH10) SATA */ 527 0, 528 piixsata_chip_map 529 }, 530 { PCI_PRODUCT_INTEL_82801JI_SATA_2, /* Intel 82801JI (ICH10) SATA */ 531 0, 532 piixsata_chip_map 533 }, 534 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 535 0, 536 piixsata_chip_map 537 }, 538 { PCI_PRODUCT_INTEL_3400_SATA_1, /* Intel 3400 SATA */ 539 0, 540 piixsata_chip_map 541 }, 542 { PCI_PRODUCT_INTEL_3400_SATA_2, /* Intel 3400 SATA */ 543 0, 544 piixsata_chip_map 545 }, 546 { PCI_PRODUCT_INTEL_3400_SATA_3, /* Intel 3400 SATA */ 547 0, 548 piixsata_chip_map 549 }, 550 { PCI_PRODUCT_INTEL_3400_SATA_4, /* Intel 3400 SATA */ 551 0, 552 piixsata_chip_map 553 }, 554 { PCI_PRODUCT_INTEL_3400_SATA_5, /* Intel 3400 SATA */ 555 0, 556 piixsata_chip_map 557 }, 558 { PCI_PRODUCT_INTEL_3400_SATA_6, /* Intel 3400 SATA */ 559 0, 560 piixsata_chip_map 561 }, 562 { PCI_PRODUCT_INTEL_C600_SATA, /* Intel C600 SATA */ 563 0, 564 piixsata_chip_map 565 }, 566 { PCI_PRODUCT_INTEL_C610_SATA_1, /* Intel C610 SATA */ 567 0, 568 piixsata_chip_map 569 }, 570 { PCI_PRODUCT_INTEL_C610_SATA_2, /* Intel C610 SATA */ 571 0, 572 piixsata_chip_map 573 }, 574 { PCI_PRODUCT_INTEL_C610_SATA_3, /* Intel C610 SATA */ 575 0, 576 piixsata_chip_map 577 }, 578 { PCI_PRODUCT_INTEL_6SERIES_SATA_1, /* Intel 6 Series SATA */ 579 0, 580 piixsata_chip_map 581 }, 582 { PCI_PRODUCT_INTEL_6SERIES_SATA_2, /* Intel 6 Series SATA */ 583 0, 584 piixsata_chip_map 585 }, 586 { PCI_PRODUCT_INTEL_6SERIES_SATA_3, /* Intel 6 Series SATA */ 587 0, 588 piixsata_chip_map 589 }, 590 { PCI_PRODUCT_INTEL_6SERIES_SATA_4, /* Intel 6 Series SATA */ 591 0, 592 piixsata_chip_map 593 }, 594 { PCI_PRODUCT_INTEL_7SERIES_SATA_1, /* Intel 7 Series SATA */ 595 0, 596 piixsata_chip_map 597 }, 598 { PCI_PRODUCT_INTEL_7SERIES_SATA_2, /* Intel 7 Series SATA */ 599 0, 600 piixsata_chip_map 601 }, 602 { PCI_PRODUCT_INTEL_7SERIES_SATA_3, /* Intel 7 Series SATA */ 603 0, 604 piixsata_chip_map 605 }, 606 { PCI_PRODUCT_INTEL_7SERIES_SATA_4, /* Intel 7 Series SATA */ 607 0, 608 piixsata_chip_map 609 }, 610 { PCI_PRODUCT_INTEL_8SERIES_SATA_1, /* Intel 8 Series SATA */ 611 0, 612 piixsata_chip_map 613 }, 614 { PCI_PRODUCT_INTEL_8SERIES_SATA_2, /* Intel 8 Series SATA */ 615 0, 616 piixsata_chip_map 617 }, 618 { PCI_PRODUCT_INTEL_8SERIES_SATA_3, /* Intel 8 Series SATA */ 619 0, 620 piixsata_chip_map 621 }, 622 { PCI_PRODUCT_INTEL_8SERIES_SATA_4, /* Intel 8 Series SATA */ 623 0, 624 piixsata_chip_map 625 }, 626 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_1, /* Intel 8 Series SATA */ 627 0, 628 piixsata_chip_map 629 }, 630 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_2, /* Intel 8 Series SATA */ 631 0, 632 piixsata_chip_map 633 }, 634 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_3, /* Intel 8 Series SATA */ 635 0, 636 piixsata_chip_map 637 }, 638 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_4, /* Intel 8 Series SATA */ 639 0, 640 piixsata_chip_map 641 }, 642 { PCI_PRODUCT_INTEL_9SERIES_SATA_1, /* Intel 9 Series SATA */ 643 0, 644 piixsata_chip_map 645 }, 646 { PCI_PRODUCT_INTEL_9SERIES_SATA_2, /* Intel 9 Series SATA */ 647 0, 648 piixsata_chip_map 649 }, 650 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_1, /* Intel Atom C2000 SATA */ 651 0, 652 piixsata_chip_map 653 }, 654 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_2, /* Intel Atom C2000 SATA */ 655 0, 656 piixsata_chip_map 657 }, 658 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_3, /* Intel Atom C2000 SATA */ 659 0, 660 piixsata_chip_map 661 }, 662 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_4, /* Intel Atom C2000 SATA */ 663 0, 664 piixsata_chip_map 665 }, 666 { PCI_PRODUCT_INTEL_BAYTRAIL_SATA_1, /* Intel Baytrail SATA */ 667 0, 668 piixsata_chip_map 669 }, 670 { PCI_PRODUCT_INTEL_BAYTRAIL_SATA_2, /* Intel Baytrail SATA */ 671 0, 672 piixsata_chip_map 673 }, 674 { PCI_PRODUCT_INTEL_EP80579_SATA, /* Intel EP80579 SATA */ 675 0, 676 piixsata_chip_map 677 }, 678 { PCI_PRODUCT_INTEL_DH8900_SATA_1, /* Intel DH8900 SATA */ 679 0, 680 piixsata_chip_map 681 }, 682 { PCI_PRODUCT_INTEL_DH8900_SATA_2, /* Intel DH8900 SATA */ 683 0, 684 piixsata_chip_map 685 }, 686 { PCI_PRODUCT_INTEL_SCH_IDE, /* Intel SCH IDE */ 687 0, 688 sch_chip_map 689 } 690 }; 691 692 const struct pciide_product_desc pciide_amd_products[] = { 693 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 694 0, 695 amd756_chip_map 696 }, 697 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 698 0, 699 amd756_chip_map 700 }, 701 { PCI_PRODUCT_AMD_PBC768_IDE, 702 0, 703 amd756_chip_map 704 }, 705 { PCI_PRODUCT_AMD_8111_IDE, 706 0, 707 amd756_chip_map 708 }, 709 { PCI_PRODUCT_AMD_CS5536_IDE, 710 0, 711 amd756_chip_map 712 }, 713 { PCI_PRODUCT_AMD_HUDSON2_IDE, 714 0, 715 ixp_chip_map 716 } 717 }; 718 719 #ifdef notyet 720 const struct pciide_product_desc pciide_opti_products[] = { 721 722 { PCI_PRODUCT_OPTI_82C621, 723 0, 724 opti_chip_map 725 }, 726 { PCI_PRODUCT_OPTI_82C568, 727 0, 728 opti_chip_map 729 }, 730 { PCI_PRODUCT_OPTI_82D568, 731 0, 732 opti_chip_map 733 } 734 }; 735 #endif 736 737 const struct pciide_product_desc pciide_cmd_products[] = { 738 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 739 0, 740 cmd_chip_map 741 }, 742 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 743 0, 744 cmd0643_9_chip_map 745 }, 746 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 747 0, 748 cmd0643_9_chip_map 749 }, 750 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 751 0, 752 cmd0643_9_chip_map 753 }, 754 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 755 0, 756 cmd0643_9_chip_map 757 }, 758 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 759 IDE_PCI_CLASS_OVERRIDE, 760 cmd680_chip_map 761 }, 762 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 763 0, 764 sii3112_chip_map 765 }, 766 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 767 0, 768 sii3112_chip_map 769 }, 770 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 771 0, 772 sii3112_chip_map 773 }, 774 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 775 0, 776 sii3114_chip_map 777 } 778 }; 779 780 const struct pciide_product_desc pciide_via_products[] = { 781 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 782 0, 783 apollo_chip_map 784 }, 785 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 786 0, 787 apollo_chip_map 788 }, 789 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 790 IDE_PCI_CLASS_OVERRIDE, 791 apollo_chip_map 792 }, 793 { PCI_PRODUCT_VIATECH_VT6415, /* VIA VT6415 IDE */ 794 IDE_PCI_CLASS_OVERRIDE, 795 apollo_chip_map 796 }, 797 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 798 0, 799 apollo_chip_map 800 }, 801 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 802 0, 803 apollo_chip_map 804 }, 805 { PCI_PRODUCT_VIATECH_VX855_IDE, /* VIA VX855 IDE */ 806 0, 807 apollo_chip_map 808 }, 809 { PCI_PRODUCT_VIATECH_VX900_IDE, /* VIA VX900 IDE */ 810 0, 811 apollo_chip_map 812 }, 813 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 814 0, 815 sata_chip_map 816 }, 817 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 818 0, 819 sata_chip_map 820 }, 821 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 822 0, 823 sata_chip_map 824 }, 825 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 826 0, 827 sata_chip_map 828 }, 829 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 830 0, 831 sata_chip_map 832 }, 833 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 834 0, 835 sata_chip_map 836 } 837 }; 838 839 const struct pciide_product_desc pciide_cypress_products[] = { 840 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 841 IDE_16BIT_IOSPACE, 842 cy693_chip_map 843 } 844 }; 845 846 const struct pciide_product_desc pciide_sis_products[] = { 847 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 848 0, 849 sis_chip_map 850 }, 851 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 852 0, 853 sata_chip_map 854 }, 855 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 856 0, 857 sata_chip_map 858 }, 859 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 860 0, 861 sata_chip_map 862 }, 863 { PCI_PRODUCT_SIS_1183, /* SIS 1183 SATA */ 864 0, 865 sata_chip_map 866 } 867 }; 868 869 /* 870 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 871 * has been banished to the MD i386 pciide_machdep 872 */ 873 const struct pciide_product_desc pciide_natsemi_products[] = { 874 #ifdef __i386__ 875 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 876 0, 877 gcsc_chip_map 878 }, 879 #endif 880 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 881 0, 882 natsemi_chip_map 883 }, 884 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 885 0, 886 ns_scx200_chip_map 887 } 888 }; 889 890 const struct pciide_product_desc pciide_acer_products[] = { 891 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 892 0, 893 acer_chip_map 894 } 895 }; 896 897 const struct pciide_product_desc pciide_triones_products[] = { 898 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 899 IDE_PCI_CLASS_OVERRIDE, 900 hpt_chip_map, 901 }, 902 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 903 IDE_PCI_CLASS_OVERRIDE, 904 hpt_chip_map 905 }, 906 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 907 IDE_PCI_CLASS_OVERRIDE, 908 hpt_chip_map 909 }, 910 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 911 IDE_PCI_CLASS_OVERRIDE, 912 hpt_chip_map 913 }, 914 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 915 IDE_PCI_CLASS_OVERRIDE, 916 hpt_chip_map 917 } 918 }; 919 920 const struct pciide_product_desc pciide_promise_products[] = { 921 { PCI_PRODUCT_PROMISE_PDC20246, 922 IDE_PCI_CLASS_OVERRIDE, 923 pdc202xx_chip_map, 924 }, 925 { PCI_PRODUCT_PROMISE_PDC20262, 926 IDE_PCI_CLASS_OVERRIDE, 927 pdc202xx_chip_map, 928 }, 929 { PCI_PRODUCT_PROMISE_PDC20265, 930 IDE_PCI_CLASS_OVERRIDE, 931 pdc202xx_chip_map, 932 }, 933 { PCI_PRODUCT_PROMISE_PDC20267, 934 IDE_PCI_CLASS_OVERRIDE, 935 pdc202xx_chip_map, 936 }, 937 { PCI_PRODUCT_PROMISE_PDC20268, 938 IDE_PCI_CLASS_OVERRIDE, 939 pdc202xx_chip_map, 940 }, 941 { PCI_PRODUCT_PROMISE_PDC20268R, 942 IDE_PCI_CLASS_OVERRIDE, 943 pdc202xx_chip_map, 944 }, 945 { PCI_PRODUCT_PROMISE_PDC20269, 946 IDE_PCI_CLASS_OVERRIDE, 947 pdc202xx_chip_map, 948 }, 949 { PCI_PRODUCT_PROMISE_PDC20271, 950 IDE_PCI_CLASS_OVERRIDE, 951 pdc202xx_chip_map, 952 }, 953 { PCI_PRODUCT_PROMISE_PDC20275, 954 IDE_PCI_CLASS_OVERRIDE, 955 pdc202xx_chip_map, 956 }, 957 { PCI_PRODUCT_PROMISE_PDC20276, 958 IDE_PCI_CLASS_OVERRIDE, 959 pdc202xx_chip_map, 960 }, 961 { PCI_PRODUCT_PROMISE_PDC20277, 962 IDE_PCI_CLASS_OVERRIDE, 963 pdc202xx_chip_map, 964 }, 965 { PCI_PRODUCT_PROMISE_PDC20318, 966 IDE_PCI_CLASS_OVERRIDE, 967 pdcsata_chip_map, 968 }, 969 { PCI_PRODUCT_PROMISE_PDC20319, 970 IDE_PCI_CLASS_OVERRIDE, 971 pdcsata_chip_map, 972 }, 973 { PCI_PRODUCT_PROMISE_PDC20371, 974 IDE_PCI_CLASS_OVERRIDE, 975 pdcsata_chip_map, 976 }, 977 { PCI_PRODUCT_PROMISE_PDC20375, 978 IDE_PCI_CLASS_OVERRIDE, 979 pdcsata_chip_map, 980 }, 981 { PCI_PRODUCT_PROMISE_PDC20376, 982 IDE_PCI_CLASS_OVERRIDE, 983 pdcsata_chip_map, 984 }, 985 { PCI_PRODUCT_PROMISE_PDC20377, 986 IDE_PCI_CLASS_OVERRIDE, 987 pdcsata_chip_map, 988 }, 989 { PCI_PRODUCT_PROMISE_PDC20378, 990 IDE_PCI_CLASS_OVERRIDE, 991 pdcsata_chip_map, 992 }, 993 { PCI_PRODUCT_PROMISE_PDC20379, 994 IDE_PCI_CLASS_OVERRIDE, 995 pdcsata_chip_map, 996 }, 997 { PCI_PRODUCT_PROMISE_PDC40518, 998 IDE_PCI_CLASS_OVERRIDE, 999 pdcsata_chip_map, 1000 }, 1001 { PCI_PRODUCT_PROMISE_PDC40519, 1002 IDE_PCI_CLASS_OVERRIDE, 1003 pdcsata_chip_map, 1004 }, 1005 { PCI_PRODUCT_PROMISE_PDC40718, 1006 IDE_PCI_CLASS_OVERRIDE, 1007 pdcsata_chip_map, 1008 }, 1009 { PCI_PRODUCT_PROMISE_PDC40719, 1010 IDE_PCI_CLASS_OVERRIDE, 1011 pdcsata_chip_map, 1012 }, 1013 { PCI_PRODUCT_PROMISE_PDC40779, 1014 IDE_PCI_CLASS_OVERRIDE, 1015 pdcsata_chip_map, 1016 }, 1017 { PCI_PRODUCT_PROMISE_PDC20571, 1018 IDE_PCI_CLASS_OVERRIDE, 1019 pdcsata_chip_map, 1020 }, 1021 { PCI_PRODUCT_PROMISE_PDC20575, 1022 IDE_PCI_CLASS_OVERRIDE, 1023 pdcsata_chip_map, 1024 }, 1025 { PCI_PRODUCT_PROMISE_PDC20579, 1026 IDE_PCI_CLASS_OVERRIDE, 1027 pdcsata_chip_map, 1028 }, 1029 { PCI_PRODUCT_PROMISE_PDC20771, 1030 IDE_PCI_CLASS_OVERRIDE, 1031 pdcsata_chip_map, 1032 }, 1033 { PCI_PRODUCT_PROMISE_PDC20775, 1034 IDE_PCI_CLASS_OVERRIDE, 1035 pdcsata_chip_map, 1036 } 1037 }; 1038 1039 const struct pciide_product_desc pciide_acard_products[] = { 1040 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 1041 IDE_PCI_CLASS_OVERRIDE, 1042 acard_chip_map, 1043 }, 1044 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 1045 IDE_PCI_CLASS_OVERRIDE, 1046 acard_chip_map, 1047 }, 1048 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 1049 IDE_PCI_CLASS_OVERRIDE, 1050 acard_chip_map, 1051 }, 1052 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 1053 IDE_PCI_CLASS_OVERRIDE, 1054 acard_chip_map, 1055 }, 1056 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 1057 IDE_PCI_CLASS_OVERRIDE, 1058 acard_chip_map, 1059 } 1060 }; 1061 1062 const struct pciide_product_desc pciide_serverworks_products[] = { 1063 { PCI_PRODUCT_RCC_OSB4_IDE, 1064 0, 1065 serverworks_chip_map, 1066 }, 1067 { PCI_PRODUCT_RCC_CSB5_IDE, 1068 0, 1069 serverworks_chip_map, 1070 }, 1071 { PCI_PRODUCT_RCC_CSB6_IDE, 1072 0, 1073 serverworks_chip_map, 1074 }, 1075 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 1076 0, 1077 serverworks_chip_map, 1078 }, 1079 { PCI_PRODUCT_RCC_HT_1000_IDE, 1080 0, 1081 serverworks_chip_map, 1082 }, 1083 { PCI_PRODUCT_RCC_K2_SATA, 1084 0, 1085 svwsata_chip_map, 1086 }, 1087 { PCI_PRODUCT_RCC_FRODO4_SATA, 1088 0, 1089 svwsata_chip_map, 1090 }, 1091 { PCI_PRODUCT_RCC_FRODO8_SATA, 1092 0, 1093 svwsata_chip_map, 1094 }, 1095 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 1096 0, 1097 svwsata_chip_map, 1098 }, 1099 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 1100 0, 1101 svwsata_chip_map, 1102 } 1103 }; 1104 1105 const struct pciide_product_desc pciide_nvidia_products[] = { 1106 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 1107 0, 1108 nforce_chip_map 1109 }, 1110 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 1111 0, 1112 nforce_chip_map 1113 }, 1114 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 1115 0, 1116 nforce_chip_map 1117 }, 1118 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 1119 0, 1120 nforce_chip_map 1121 }, 1122 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 1123 0, 1124 nforce_chip_map 1125 }, 1126 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 1127 0, 1128 nforce_chip_map 1129 }, 1130 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 1131 0, 1132 nforce_chip_map 1133 }, 1134 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 1135 0, 1136 nforce_chip_map 1137 }, 1138 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 1139 0, 1140 nforce_chip_map 1141 }, 1142 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 1143 0, 1144 nforce_chip_map 1145 }, 1146 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 1147 0, 1148 nforce_chip_map 1149 }, 1150 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 1151 0, 1152 nforce_chip_map 1153 }, 1154 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 1155 0, 1156 nforce_chip_map 1157 }, 1158 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 1159 0, 1160 nforce_chip_map 1161 }, 1162 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 1163 0, 1164 sata_chip_map 1165 }, 1166 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 1167 0, 1168 sata_chip_map 1169 }, 1170 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 1171 0, 1172 sata_chip_map 1173 }, 1174 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 1175 0, 1176 sata_chip_map 1177 }, 1178 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 1179 0, 1180 sata_chip_map 1181 }, 1182 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 1183 0, 1184 sata_chip_map 1185 }, 1186 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 1187 0, 1188 sata_chip_map 1189 }, 1190 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 1191 0, 1192 sata_chip_map 1193 }, 1194 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1195 0, 1196 sata_chip_map 1197 }, 1198 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1199 0, 1200 sata_chip_map 1201 }, 1202 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1203 0, 1204 sata_chip_map 1205 }, 1206 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1207 0, 1208 sata_chip_map 1209 }, 1210 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1211 0, 1212 sata_chip_map 1213 }, 1214 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1215 0, 1216 sata_chip_map 1217 }, 1218 { PCI_PRODUCT_NVIDIA_MCP65_SATA_1, 1219 0, 1220 sata_chip_map 1221 }, 1222 { PCI_PRODUCT_NVIDIA_MCP65_SATA_2, 1223 0, 1224 sata_chip_map 1225 }, 1226 { PCI_PRODUCT_NVIDIA_MCP65_SATA_3, 1227 0, 1228 sata_chip_map 1229 }, 1230 { PCI_PRODUCT_NVIDIA_MCP65_SATA_4, 1231 0, 1232 sata_chip_map 1233 }, 1234 { PCI_PRODUCT_NVIDIA_MCP67_SATA_1, 1235 0, 1236 sata_chip_map 1237 }, 1238 { PCI_PRODUCT_NVIDIA_MCP67_SATA_2, 1239 0, 1240 sata_chip_map 1241 }, 1242 { PCI_PRODUCT_NVIDIA_MCP67_SATA_3, 1243 0, 1244 sata_chip_map 1245 }, 1246 { PCI_PRODUCT_NVIDIA_MCP67_SATA_4, 1247 0, 1248 sata_chip_map 1249 }, 1250 { PCI_PRODUCT_NVIDIA_MCP77_SATA_1, 1251 0, 1252 sata_chip_map 1253 }, 1254 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1255 0, 1256 sata_chip_map 1257 }, 1258 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1259 0, 1260 sata_chip_map 1261 }, 1262 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1263 0, 1264 sata_chip_map 1265 }, 1266 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1267 0, 1268 sata_chip_map 1269 }, 1270 { PCI_PRODUCT_NVIDIA_MCP89_SATA_1, 1271 0, 1272 sata_chip_map 1273 }, 1274 { PCI_PRODUCT_NVIDIA_MCP89_SATA_2, 1275 0, 1276 sata_chip_map 1277 }, 1278 { PCI_PRODUCT_NVIDIA_MCP89_SATA_3, 1279 0, 1280 sata_chip_map 1281 }, 1282 { PCI_PRODUCT_NVIDIA_MCP89_SATA_4, 1283 0, 1284 sata_chip_map 1285 } 1286 }; 1287 1288 const struct pciide_product_desc pciide_ite_products[] = { 1289 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1290 IDE_PCI_CLASS_OVERRIDE, 1291 ite_chip_map 1292 }, 1293 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1294 IDE_PCI_CLASS_OVERRIDE, 1295 ite_chip_map 1296 } 1297 }; 1298 1299 const struct pciide_product_desc pciide_ati_products[] = { 1300 { PCI_PRODUCT_ATI_SB200_IDE, 1301 0, 1302 ixp_chip_map 1303 }, 1304 { PCI_PRODUCT_ATI_SB300_IDE, 1305 0, 1306 ixp_chip_map 1307 }, 1308 { PCI_PRODUCT_ATI_SB400_IDE, 1309 0, 1310 ixp_chip_map 1311 }, 1312 { PCI_PRODUCT_ATI_SB600_IDE, 1313 0, 1314 ixp_chip_map 1315 }, 1316 { PCI_PRODUCT_ATI_SB700_IDE, 1317 0, 1318 ixp_chip_map 1319 }, 1320 { PCI_PRODUCT_ATI_SB300_SATA, 1321 0, 1322 sii3112_chip_map 1323 }, 1324 { PCI_PRODUCT_ATI_SB400_SATA_1, 1325 0, 1326 sii3112_chip_map 1327 }, 1328 { PCI_PRODUCT_ATI_SB400_SATA_2, 1329 0, 1330 sii3112_chip_map 1331 } 1332 }; 1333 1334 const struct pciide_product_desc pciide_jmicron_products[] = { 1335 { PCI_PRODUCT_JMICRON_JMB361, 1336 0, 1337 jmicron_chip_map 1338 }, 1339 { PCI_PRODUCT_JMICRON_JMB363, 1340 0, 1341 jmicron_chip_map 1342 }, 1343 { PCI_PRODUCT_JMICRON_JMB365, 1344 0, 1345 jmicron_chip_map 1346 }, 1347 { PCI_PRODUCT_JMICRON_JMB366, 1348 0, 1349 jmicron_chip_map 1350 }, 1351 { PCI_PRODUCT_JMICRON_JMB368, 1352 0, 1353 jmicron_chip_map 1354 } 1355 }; 1356 1357 const struct pciide_product_desc pciide_phison_products[] = { 1358 { PCI_PRODUCT_PHISON_PS5000, 1359 0, 1360 phison_chip_map 1361 }, 1362 }; 1363 1364 const struct pciide_product_desc pciide_rdc_products[] = { 1365 { PCI_PRODUCT_RDC_R1012_IDE, 1366 0, 1367 rdc_chip_map 1368 }, 1369 }; 1370 1371 struct pciide_vendor_desc { 1372 u_int32_t ide_vendor; 1373 const struct pciide_product_desc *ide_products; 1374 int ide_nproducts; 1375 }; 1376 1377 const struct pciide_vendor_desc pciide_vendors[] = { 1378 { PCI_VENDOR_INTEL, pciide_intel_products, 1379 nitems(pciide_intel_products) }, 1380 { PCI_VENDOR_AMD, pciide_amd_products, 1381 nitems(pciide_amd_products) }, 1382 #ifdef notyet 1383 { PCI_VENDOR_OPTI, pciide_opti_products, 1384 nitems(pciide_opti_products) }, 1385 #endif 1386 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1387 nitems(pciide_cmd_products) }, 1388 { PCI_VENDOR_VIATECH, pciide_via_products, 1389 nitems(pciide_via_products) }, 1390 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1391 nitems(pciide_cypress_products) }, 1392 { PCI_VENDOR_SIS, pciide_sis_products, 1393 nitems(pciide_sis_products) }, 1394 { PCI_VENDOR_NS, pciide_natsemi_products, 1395 nitems(pciide_natsemi_products) }, 1396 { PCI_VENDOR_ALI, pciide_acer_products, 1397 nitems(pciide_acer_products) }, 1398 { PCI_VENDOR_TRIONES, pciide_triones_products, 1399 nitems(pciide_triones_products) }, 1400 { PCI_VENDOR_ACARD, pciide_acard_products, 1401 nitems(pciide_acard_products) }, 1402 { PCI_VENDOR_RCC, pciide_serverworks_products, 1403 nitems(pciide_serverworks_products) }, 1404 { PCI_VENDOR_PROMISE, pciide_promise_products, 1405 nitems(pciide_promise_products) }, 1406 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1407 nitems(pciide_nvidia_products) }, 1408 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1409 nitems(pciide_ite_products) }, 1410 { PCI_VENDOR_ATI, pciide_ati_products, 1411 nitems(pciide_ati_products) }, 1412 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1413 nitems(pciide_jmicron_products) }, 1414 { PCI_VENDOR_PHISON, pciide_phison_products, 1415 nitems(pciide_phison_products) }, 1416 { PCI_VENDOR_RDC, pciide_rdc_products, 1417 nitems(pciide_rdc_products) } 1418 }; 1419 1420 /* options passed via the 'flags' config keyword */ 1421 #define PCIIDE_OPTIONS_DMA 0x01 1422 1423 int pciide_match(struct device *, void *, void *); 1424 void pciide_attach(struct device *, struct device *, void *); 1425 int pciide_detach(struct device *, int); 1426 int pciide_activate(struct device *, int); 1427 1428 struct cfattach pciide_pci_ca = { 1429 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1430 pciide_detach, pciide_activate 1431 }; 1432 1433 struct cfattach pciide_jmb_ca = { 1434 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1435 pciide_detach, pciide_activate 1436 }; 1437 1438 struct cfdriver pciide_cd = { 1439 NULL, "pciide", DV_DULL 1440 }; 1441 1442 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1443 1444 const struct pciide_product_desc * 1445 pciide_lookup_product(u_int32_t id) 1446 { 1447 const struct pciide_product_desc *pp; 1448 const struct pciide_vendor_desc *vp; 1449 int i; 1450 1451 for (i = 0, vp = pciide_vendors; i < nitems(pciide_vendors); vp++, i++) 1452 if (PCI_VENDOR(id) == vp->ide_vendor) 1453 break; 1454 1455 if (i == nitems(pciide_vendors)) 1456 return (NULL); 1457 1458 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1459 if (PCI_PRODUCT(id) == pp->ide_product) 1460 break; 1461 1462 if (i == vp->ide_nproducts) 1463 return (NULL); 1464 return (pp); 1465 } 1466 1467 int 1468 pciide_match(struct device *parent, void *match, void *aux) 1469 { 1470 struct pci_attach_args *pa = aux; 1471 const struct pciide_product_desc *pp; 1472 1473 /* 1474 * Some IDE controllers have severe bugs when used in PCI mode. 1475 * We punt and attach them to the ISA bus instead. 1476 */ 1477 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1478 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1479 return (0); 1480 1481 /* 1482 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1483 * controllers. Let see if we can deal with it anyway. 1484 */ 1485 pp = pciide_lookup_product(pa->pa_id); 1486 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1487 return (1); 1488 1489 /* 1490 * Check the ID register to see that it's a PCI IDE controller. 1491 * If it is, we assume that we can deal with it; it _should_ 1492 * work in a standardized way... 1493 */ 1494 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1495 switch (PCI_SUBCLASS(pa->pa_class)) { 1496 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1497 return (1); 1498 1499 /* 1500 * We only match these if we know they have 1501 * a match, as we may not support native interfaces 1502 * on them. 1503 */ 1504 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1505 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1506 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1507 if (pp) 1508 return (1); 1509 else 1510 return (0); 1511 break; 1512 } 1513 } 1514 1515 return (0); 1516 } 1517 1518 void 1519 pciide_attach(struct device *parent, struct device *self, void *aux) 1520 { 1521 struct pciide_softc *sc = (struct pciide_softc *)self; 1522 struct pci_attach_args *pa = aux; 1523 1524 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1525 if (sc->sc_pp == NULL) 1526 sc->sc_pp = &default_product_desc; 1527 sc->sc_rev = PCI_REVISION(pa->pa_class); 1528 1529 sc->sc_pc = pa->pa_pc; 1530 sc->sc_tag = pa->pa_tag; 1531 1532 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1533 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1534 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1535 1536 sc->sc_dmacmd_read = pciide_dmacmd_read; 1537 sc->sc_dmacmd_write = pciide_dmacmd_write; 1538 sc->sc_dmactl_read = pciide_dmactl_read; 1539 sc->sc_dmactl_write = pciide_dmactl_write; 1540 sc->sc_dmatbl_write = pciide_dmatbl_write; 1541 1542 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1543 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1544 1545 if (pciide_skip_ata) 1546 sc->sc_wdcdev.quirks |= WDC_QUIRK_NOATA; 1547 if (pciide_skip_atapi) 1548 sc->sc_wdcdev.quirks |= WDC_QUIRK_NOATAPI; 1549 1550 sc->sc_pp->chip_map(sc, pa); 1551 1552 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1553 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1554 DEBUG_PROBE); 1555 } 1556 1557 int 1558 pciide_detach(struct device *self, int flags) 1559 { 1560 struct pciide_softc *sc = (struct pciide_softc *)self; 1561 if (sc->chip_unmap == NULL) 1562 panic("unmap not yet implemented for this chipset"); 1563 else 1564 sc->chip_unmap(sc, flags); 1565 1566 return 0; 1567 } 1568 1569 int 1570 pciide_activate(struct device *self, int act) 1571 { 1572 int rv = 0; 1573 struct pciide_softc *sc = (struct pciide_softc *)self; 1574 int i; 1575 1576 switch (act) { 1577 case DVACT_SUSPEND: 1578 rv = config_activate_children(self, act); 1579 1580 for (i = 0; i < nitems(sc->sc_save); i++) 1581 sc->sc_save[i] = pci_conf_read(sc->sc_pc, 1582 sc->sc_tag, PCI_MAPREG_END + 0x18 + (i * 4)); 1583 1584 if (sc->sc_pp->chip_map == sch_chip_map) { 1585 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1586 sc->sc_tag, SCH_D0TIM); 1587 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1588 sc->sc_tag, SCH_D1TIM); 1589 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1590 sc->sc_save2[0] = pciide_pci_read(sc->sc_pc, 1591 sc->sc_tag, ICH5_SATA_MAP); 1592 sc->sc_save2[1] = pciide_pci_read(sc->sc_pc, 1593 sc->sc_tag, ICH5_SATA_PI); 1594 sc->sc_save2[2] = pciide_pci_read(sc->sc_pc, 1595 sc->sc_tag, ICH_SATA_PCS); 1596 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1597 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1598 sc->sc_tag, SII3112_SCS_CMD); 1599 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1600 sc->sc_tag, SII3112_PCI_CFGCTL); 1601 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1602 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1603 sc->sc_tag, IT_TIM(0)); 1604 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1605 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1606 sc->sc_tag, NFORCE_PIODMATIM); 1607 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1608 sc->sc_tag, NFORCE_PIOTIM); 1609 sc->sc_save2[2] = pci_conf_read(sc->sc_pc, 1610 sc->sc_tag, NFORCE_UDMATIM); 1611 } 1612 break; 1613 case DVACT_RESUME: 1614 for (i = 0; i < nitems(sc->sc_save); i++) 1615 pci_conf_write(sc->sc_pc, sc->sc_tag, 1616 PCI_MAPREG_END + 0x18 + (i * 4), 1617 sc->sc_save[i]); 1618 1619 if (sc->sc_pp->chip_map == default_chip_map || 1620 sc->sc_pp->chip_map == sata_chip_map || 1621 sc->sc_pp->chip_map == piix_chip_map || 1622 sc->sc_pp->chip_map == amd756_chip_map || 1623 sc->sc_pp->chip_map == phison_chip_map || 1624 sc->sc_pp->chip_map == rdc_chip_map || 1625 sc->sc_pp->chip_map == ixp_chip_map || 1626 sc->sc_pp->chip_map == acard_chip_map || 1627 sc->sc_pp->chip_map == apollo_chip_map || 1628 sc->sc_pp->chip_map == sis_chip_map) { 1629 /* nothing to restore -- uses only 0x40 - 0x56 */ 1630 } else if (sc->sc_pp->chip_map == sch_chip_map) { 1631 pci_conf_write(sc->sc_pc, sc->sc_tag, 1632 SCH_D0TIM, sc->sc_save2[0]); 1633 pci_conf_write(sc->sc_pc, sc->sc_tag, 1634 SCH_D1TIM, sc->sc_save2[1]); 1635 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1636 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1637 ICH5_SATA_MAP, sc->sc_save2[0]); 1638 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1639 ICH5_SATA_PI, sc->sc_save2[1]); 1640 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1641 ICH_SATA_PCS, sc->sc_save2[2]); 1642 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1643 pci_conf_write(sc->sc_pc, sc->sc_tag, 1644 SII3112_SCS_CMD, sc->sc_save2[0]); 1645 delay(50 * 1000); 1646 pci_conf_write(sc->sc_pc, sc->sc_tag, 1647 SII3112_PCI_CFGCTL, sc->sc_save2[1]); 1648 delay(50 * 1000); 1649 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1650 pci_conf_write(sc->sc_pc, sc->sc_tag, 1651 IT_TIM(0), sc->sc_save2[0]); 1652 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1653 pci_conf_write(sc->sc_pc, sc->sc_tag, 1654 NFORCE_PIODMATIM, sc->sc_save2[0]); 1655 pci_conf_write(sc->sc_pc, sc->sc_tag, 1656 NFORCE_PIOTIM, sc->sc_save2[1]); 1657 pci_conf_write(sc->sc_pc, sc->sc_tag, 1658 NFORCE_UDMATIM, sc->sc_save2[2]); 1659 } else { 1660 printf("%s: restore for unknown chip map %x\n", 1661 sc->sc_wdcdev.sc_dev.dv_xname, 1662 sc->sc_pp->ide_product); 1663 } 1664 1665 rv = config_activate_children(self, act); 1666 break; 1667 default: 1668 rv = config_activate_children(self, act); 1669 break; 1670 } 1671 return (rv); 1672 } 1673 1674 int 1675 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1676 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1677 { 1678 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1679 struct channel_softc *wdc_cp = &cp->wdc_channel; 1680 pcireg_t csr; 1681 1682 cp->compat = 1; 1683 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1684 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1685 1686 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1687 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1688 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1689 1690 wdc_cp->cmd_iot = pa->pa_iot; 1691 1692 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1693 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1694 printf("%s: couldn't map %s cmd regs\n", 1695 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1696 return (0); 1697 } 1698 1699 wdc_cp->ctl_iot = pa->pa_iot; 1700 1701 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1702 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1703 printf("%s: couldn't map %s ctl regs\n", 1704 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1705 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1706 PCIIDE_COMPAT_CMD_SIZE); 1707 return (0); 1708 } 1709 wdc_cp->cmd_iosz = *cmdsizep; 1710 wdc_cp->ctl_iosz = *ctlsizep; 1711 1712 return (1); 1713 } 1714 1715 int 1716 pciide_unmapregs_compat(struct pciide_softc *sc, struct pciide_channel *cp) 1717 { 1718 struct channel_softc *wdc_cp = &cp->wdc_channel; 1719 1720 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1721 bus_space_unmap(wdc_cp->ctl_iot, wdc_cp->cmd_ioh, wdc_cp->ctl_iosz); 1722 1723 if (sc->sc_pci_ih != NULL) { 1724 pciide_machdep_compat_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1725 sc->sc_pci_ih = NULL; 1726 } 1727 1728 return (0); 1729 } 1730 1731 int 1732 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1733 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1734 { 1735 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1736 struct channel_softc *wdc_cp = &cp->wdc_channel; 1737 const char *intrstr; 1738 pci_intr_handle_t intrhandle; 1739 pcireg_t maptype; 1740 1741 cp->compat = 0; 1742 1743 if (sc->sc_pci_ih == NULL) { 1744 if (pci_intr_map(pa, &intrhandle) != 0) { 1745 printf("%s: couldn't map native-PCI interrupt\n", 1746 sc->sc_wdcdev.sc_dev.dv_xname); 1747 return (0); 1748 } 1749 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1750 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1751 intrhandle, IPL_BIO, pci_intr, sc, 1752 sc->sc_wdcdev.sc_dev.dv_xname); 1753 if (sc->sc_pci_ih != NULL) { 1754 printf("%s: using %s for native-PCI interrupt\n", 1755 sc->sc_wdcdev.sc_dev.dv_xname, 1756 intrstr ? intrstr : "unknown interrupt"); 1757 } else { 1758 printf("%s: couldn't establish native-PCI interrupt", 1759 sc->sc_wdcdev.sc_dev.dv_xname); 1760 if (intrstr != NULL) 1761 printf(" at %s", intrstr); 1762 printf("\n"); 1763 return (0); 1764 } 1765 } 1766 cp->ih = sc->sc_pci_ih; 1767 sc->sc_pc = pa->pa_pc; 1768 1769 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1770 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1771 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1772 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1773 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1774 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1775 maptype, 0, 1776 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1777 printf("%s: couldn't map %s cmd regs\n", 1778 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1779 return (0); 1780 } 1781 1782 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1783 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1784 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1785 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1786 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1787 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1788 maptype, 0, 1789 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1790 printf("%s: couldn't map %s ctl regs\n", 1791 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1792 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1793 return (0); 1794 } 1795 /* 1796 * In native mode, 4 bytes of I/O space are mapped for the control 1797 * register, the control register is at offset 2. Pass the generic 1798 * code a handle for only one byte at the right offset. 1799 */ 1800 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1801 &wdc_cp->ctl_ioh) != 0) { 1802 printf("%s: unable to subregion %s ctl regs\n", 1803 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1804 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1805 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1806 return (0); 1807 } 1808 wdc_cp->cmd_iosz = *cmdsizep; 1809 wdc_cp->ctl_iosz = *ctlsizep; 1810 1811 return (1); 1812 } 1813 1814 int 1815 pciide_unmapregs_native(struct pciide_softc *sc, struct pciide_channel *cp) 1816 { 1817 struct channel_softc *wdc_cp = &cp->wdc_channel; 1818 1819 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1820 1821 /* Unmap the whole control space, not just the sub-region */ 1822 bus_space_unmap(wdc_cp->ctl_iot, cp->ctl_baseioh, wdc_cp->ctl_iosz); 1823 1824 if (sc->sc_pci_ih != NULL) { 1825 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1826 sc->sc_pci_ih = NULL; 1827 } 1828 1829 return (0); 1830 } 1831 1832 void 1833 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1834 { 1835 pcireg_t maptype; 1836 bus_addr_t addr; 1837 1838 /* 1839 * Map DMA registers 1840 * 1841 * Note that sc_dma_ok is the right variable to test to see if 1842 * DMA can be done. If the interface doesn't support DMA, 1843 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1844 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1845 * non-zero if the interface supports DMA and the registers 1846 * could be mapped. 1847 * 1848 * XXX Note that despite the fact that the Bus Master IDE specs 1849 * XXX say that "The bus master IDE function uses 16 bytes of IO 1850 * XXX space", some controllers (at least the United 1851 * XXX Microelectronics UM8886BF) place it in memory space. 1852 */ 1853 1854 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1855 PCIIDE_REG_BUS_MASTER_DMA); 1856 1857 switch (maptype) { 1858 case PCI_MAPREG_TYPE_IO: 1859 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1860 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1861 &addr, NULL, NULL) == 0); 1862 if (sc->sc_dma_ok == 0) { 1863 printf(", unused (couldn't query registers)"); 1864 break; 1865 } 1866 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1867 && addr >= 0x10000) { 1868 sc->sc_dma_ok = 0; 1869 printf(", unused (registers at unsafe address %#lx)", addr); 1870 break; 1871 } 1872 /* FALLTHROUGH */ 1873 1874 case PCI_MAPREG_MEM_TYPE_32BIT: 1875 sc->sc_dma_ok = (pci_mapreg_map(pa, 1876 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1877 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_iosz, 1878 0) == 0); 1879 sc->sc_dmat = pa->pa_dmat; 1880 if (sc->sc_dma_ok == 0) { 1881 printf(", unused (couldn't map registers)"); 1882 } else { 1883 sc->sc_wdcdev.dma_arg = sc; 1884 sc->sc_wdcdev.dma_init = pciide_dma_init; 1885 sc->sc_wdcdev.dma_start = pciide_dma_start; 1886 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1887 } 1888 break; 1889 1890 default: 1891 sc->sc_dma_ok = 0; 1892 printf(", (unsupported maptype 0x%x)", maptype); 1893 break; 1894 } 1895 } 1896 1897 void 1898 pciide_unmapreg_dma(struct pciide_softc *sc) 1899 { 1900 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_iosz); 1901 } 1902 1903 int 1904 pciide_intr_flag(struct pciide_channel *cp) 1905 { 1906 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1907 int chan = cp->wdc_channel.channel; 1908 1909 if (cp->dma_in_progress) { 1910 int retry = 10; 1911 int status; 1912 1913 /* Check the status register */ 1914 for (retry = 10; retry > 0; retry--) { 1915 status = PCIIDE_DMACTL_READ(sc, chan); 1916 if (status & IDEDMA_CTL_INTR) { 1917 break; 1918 } 1919 DELAY(5); 1920 } 1921 1922 /* Not for us. */ 1923 if (retry == 0) 1924 return (0); 1925 1926 return (1); 1927 } 1928 1929 return (-1); 1930 } 1931 1932 int 1933 pciide_compat_intr(void *arg) 1934 { 1935 struct pciide_channel *cp = arg; 1936 1937 if (pciide_intr_flag(cp) == 0) 1938 return (0); 1939 1940 #ifdef DIAGNOSTIC 1941 /* should only be called for a compat channel */ 1942 if (cp->compat == 0) 1943 panic("pciide compat intr called for non-compat chan %p", cp); 1944 #endif 1945 return (wdcintr(&cp->wdc_channel)); 1946 } 1947 1948 int 1949 pciide_pci_intr(void *arg) 1950 { 1951 struct pciide_softc *sc = arg; 1952 struct pciide_channel *cp; 1953 struct channel_softc *wdc_cp; 1954 int i, rv, crv; 1955 1956 rv = 0; 1957 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1958 cp = &sc->pciide_channels[i]; 1959 wdc_cp = &cp->wdc_channel; 1960 1961 /* If a compat channel skip. */ 1962 if (cp->compat) 1963 continue; 1964 1965 if (cp->hw_ok == 0) 1966 continue; 1967 1968 if (pciide_intr_flag(cp) == 0) 1969 continue; 1970 1971 crv = wdcintr(wdc_cp); 1972 if (crv == 0) 1973 ; /* leave rv alone */ 1974 else if (crv == 1) 1975 rv = 1; /* claim the intr */ 1976 else if (rv == 0) /* crv should be -1 in this case */ 1977 rv = crv; /* if we've done no better, take it */ 1978 } 1979 return (rv); 1980 } 1981 1982 u_int8_t 1983 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1984 { 1985 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1986 IDEDMA_CMD(chan))); 1987 } 1988 1989 void 1990 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1991 { 1992 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1993 IDEDMA_CMD(chan), val); 1994 } 1995 1996 u_int8_t 1997 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1998 { 1999 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2000 IDEDMA_CTL(chan))); 2001 } 2002 2003 void 2004 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 2005 { 2006 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2007 IDEDMA_CTL(chan), val); 2008 } 2009 2010 void 2011 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 2012 { 2013 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 2014 IDEDMA_TBL(chan), val); 2015 } 2016 2017 void 2018 pciide_channel_dma_setup(struct pciide_channel *cp) 2019 { 2020 int drive; 2021 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2022 struct ata_drive_datas *drvp; 2023 2024 for (drive = 0; drive < 2; drive++) { 2025 drvp = &cp->wdc_channel.ch_drive[drive]; 2026 /* If no drive, skip */ 2027 if ((drvp->drive_flags & DRIVE) == 0) 2028 continue; 2029 /* setup DMA if needed */ 2030 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2031 (drvp->drive_flags & DRIVE_UDMA) == 0) || 2032 sc->sc_dma_ok == 0) { 2033 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 2034 continue; 2035 } 2036 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 2037 != 0) { 2038 /* Abort DMA setup */ 2039 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 2040 continue; 2041 } 2042 } 2043 } 2044 2045 int 2046 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 2047 { 2048 bus_dma_segment_t seg; 2049 int error, rseg; 2050 const bus_size_t dma_table_size = 2051 sizeof(struct idedma_table) * NIDEDMA_TABLES; 2052 struct pciide_dma_maps *dma_maps = 2053 &sc->pciide_channels[channel].dma_maps[drive]; 2054 2055 /* If table was already allocated, just return */ 2056 if (dma_maps->dma_table) 2057 return (0); 2058 2059 /* Allocate memory for the DMA tables and map it */ 2060 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 2061 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 2062 BUS_DMA_NOWAIT)) != 0) { 2063 printf("%s:%d: unable to allocate table DMA for " 2064 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2065 channel, drive, error); 2066 return (error); 2067 } 2068 2069 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 2070 dma_table_size, 2071 (caddr_t *)&dma_maps->dma_table, 2072 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 2073 printf("%s:%d: unable to map table DMA for" 2074 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2075 channel, drive, error); 2076 return (error); 2077 } 2078 2079 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 2080 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 2081 seg.ds_addr), DEBUG_PROBE); 2082 2083 /* Create and load table DMA map for this disk */ 2084 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 2085 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 2086 &dma_maps->dmamap_table)) != 0) { 2087 printf("%s:%d: unable to create table DMA map for " 2088 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2089 channel, drive, error); 2090 return (error); 2091 } 2092 if ((error = bus_dmamap_load(sc->sc_dmat, 2093 dma_maps->dmamap_table, 2094 dma_maps->dma_table, 2095 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 2096 printf("%s:%d: unable to load table DMA map for " 2097 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2098 channel, drive, error); 2099 return (error); 2100 } 2101 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 2102 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 2103 /* Create a xfer DMA map for this drive */ 2104 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 2105 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 2106 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2107 &dma_maps->dmamap_xfer)) != 0) { 2108 printf("%s:%d: unable to create xfer DMA map for " 2109 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2110 channel, drive, error); 2111 return (error); 2112 } 2113 return (0); 2114 } 2115 2116 int 2117 pciide_dma_init(void *v, int channel, int drive, void *databuf, 2118 size_t datalen, int flags) 2119 { 2120 struct pciide_softc *sc = v; 2121 int error, seg; 2122 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2123 struct pciide_dma_maps *dma_maps = 2124 &sc->pciide_channels[channel].dma_maps[drive]; 2125 #ifndef BUS_DMA_RAW 2126 #define BUS_DMA_RAW 0 2127 #endif 2128 2129 error = bus_dmamap_load(sc->sc_dmat, 2130 dma_maps->dmamap_xfer, 2131 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 2132 if (error) { 2133 printf("%s:%d: unable to load xfer DMA map for " 2134 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2135 channel, drive, error); 2136 return (error); 2137 } 2138 2139 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2140 dma_maps->dmamap_xfer->dm_mapsize, 2141 (flags & WDC_DMA_READ) ? 2142 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2143 2144 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 2145 #ifdef DIAGNOSTIC 2146 /* A segment must not cross a 64k boundary */ 2147 { 2148 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 2149 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 2150 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 2151 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 2152 printf("pciide_dma: segment %d physical addr 0x%lx" 2153 " len 0x%lx not properly aligned\n", 2154 seg, phys, len); 2155 panic("pciide_dma: buf align"); 2156 } 2157 } 2158 #endif 2159 dma_maps->dma_table[seg].base_addr = 2160 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 2161 dma_maps->dma_table[seg].byte_count = 2162 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 2163 IDEDMA_BYTE_COUNT_MASK); 2164 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 2165 seg, letoh32(dma_maps->dma_table[seg].byte_count), 2166 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 2167 2168 } 2169 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 2170 htole32(IDEDMA_BYTE_COUNT_EOT); 2171 2172 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 2173 dma_maps->dmamap_table->dm_mapsize, 2174 BUS_DMASYNC_PREWRITE); 2175 2176 /* Maps are ready. Start DMA function */ 2177 #ifdef DIAGNOSTIC 2178 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 2179 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 2180 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2181 panic("pciide_dma_init: table align"); 2182 } 2183 #endif 2184 2185 /* Clear status bits */ 2186 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 2187 /* Write table addr */ 2188 PCIIDE_DMATBL_WRITE(sc, channel, 2189 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2190 /* set read/write */ 2191 PCIIDE_DMACMD_WRITE(sc, channel, 2192 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 2193 /* remember flags */ 2194 dma_maps->dma_flags = flags; 2195 return (0); 2196 } 2197 2198 void 2199 pciide_dma_start(void *v, int channel, int drive) 2200 { 2201 struct pciide_softc *sc = v; 2202 2203 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 2204 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 2205 IDEDMA_CMD_START); 2206 2207 sc->pciide_channels[channel].dma_in_progress = 1; 2208 } 2209 2210 int 2211 pciide_dma_finish(void *v, int channel, int drive, int force) 2212 { 2213 struct pciide_softc *sc = v; 2214 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2215 u_int8_t status; 2216 int error = 0; 2217 struct pciide_dma_maps *dma_maps = 2218 &sc->pciide_channels[channel].dma_maps[drive]; 2219 2220 status = PCIIDE_DMACTL_READ(sc, channel); 2221 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 2222 DEBUG_XFERS); 2223 if (status == 0xff) 2224 return (status); 2225 2226 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 2227 error = WDC_DMAST_NOIRQ; 2228 goto done; 2229 } 2230 2231 /* stop DMA channel */ 2232 PCIIDE_DMACMD_WRITE(sc, channel, 2233 ((dma_maps->dma_flags & WDC_DMA_READ) ? 2234 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 2235 2236 /* Unload the map of the data buffer */ 2237 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2238 dma_maps->dmamap_xfer->dm_mapsize, 2239 (dma_maps->dma_flags & WDC_DMA_READ) ? 2240 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2241 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 2242 2243 /* Clear status bits */ 2244 PCIIDE_DMACTL_WRITE(sc, channel, status); 2245 2246 if ((status & IDEDMA_CTL_ERR) != 0) { 2247 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 2248 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 2249 error |= WDC_DMAST_ERR; 2250 } 2251 2252 if ((status & IDEDMA_CTL_INTR) == 0) { 2253 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 2254 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 2255 drive, status); 2256 error |= WDC_DMAST_NOIRQ; 2257 } 2258 2259 if ((status & IDEDMA_CTL_ACT) != 0) { 2260 /* data underrun, may be a valid condition for ATAPI */ 2261 error |= WDC_DMAST_UNDER; 2262 } 2263 2264 done: 2265 sc->pciide_channels[channel].dma_in_progress = 0; 2266 return (error); 2267 } 2268 2269 void 2270 pciide_irqack(struct channel_softc *chp) 2271 { 2272 struct pciide_channel *cp = (struct pciide_channel *)chp; 2273 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2274 int chan = chp->channel; 2275 2276 /* clear status bits in IDE DMA registers */ 2277 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 2278 } 2279 2280 /* some common code used by several chip_map */ 2281 int 2282 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 2283 { 2284 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2285 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2286 cp->name = PCIIDE_CHANNEL_NAME(channel); 2287 cp->wdc_channel.channel = channel; 2288 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2289 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 2290 if (cp->wdc_channel.ch_queue == NULL) { 2291 printf("%s: %s " 2292 "cannot allocate channel queue", 2293 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2294 return (0); 2295 } 2296 cp->hw_ok = 1; 2297 2298 return (1); 2299 } 2300 2301 void 2302 pciide_chanfree(struct pciide_softc *sc, int channel) 2303 { 2304 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2305 if (cp->wdc_channel.ch_queue) 2306 wdc_free_queue(cp->wdc_channel.ch_queue); 2307 } 2308 2309 /* some common code used by several chip channel_map */ 2310 void 2311 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 2312 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 2313 int (*pci_intr)(void *)) 2314 { 2315 struct channel_softc *wdc_cp = &cp->wdc_channel; 2316 2317 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 2318 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 2319 pci_intr); 2320 else 2321 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2322 wdc_cp->channel, cmdsizep, ctlsizep); 2323 if (cp->hw_ok == 0) 2324 return; 2325 wdc_cp->data32iot = wdc_cp->cmd_iot; 2326 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 2327 wdcattach(wdc_cp); 2328 } 2329 2330 void 2331 pciide_unmap_chan(struct pciide_softc *sc, struct pciide_channel *cp, int flags) 2332 { 2333 struct channel_softc *wdc_cp = &cp->wdc_channel; 2334 2335 wdcdetach(wdc_cp, flags); 2336 2337 if (cp->compat != 0) 2338 pciide_unmapregs_compat(sc, cp); 2339 else 2340 pciide_unmapregs_native(sc, cp); 2341 } 2342 2343 /* 2344 * Generic code to call to know if a channel can be disabled. Return 1 2345 * if channel can be disabled, 0 if not 2346 */ 2347 int 2348 pciide_chan_candisable(struct pciide_channel *cp) 2349 { 2350 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2351 struct channel_softc *wdc_cp = &cp->wdc_channel; 2352 2353 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 2354 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 2355 printf("%s: %s disabled (no drives)\n", 2356 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2357 cp->hw_ok = 0; 2358 return (1); 2359 } 2360 return (0); 2361 } 2362 2363 /* 2364 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 2365 * Set hw_ok=0 on failure 2366 */ 2367 void 2368 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2369 int compatchan, int interface) 2370 { 2371 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2372 struct channel_softc *wdc_cp = &cp->wdc_channel; 2373 2374 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2375 return; 2376 2377 cp->compat = 1; 2378 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 2379 pa, compatchan, pciide_compat_intr, cp); 2380 if (cp->ih == NULL) { 2381 printf("%s: no compatibility interrupt for use by %s\n", 2382 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2383 cp->hw_ok = 0; 2384 } 2385 } 2386 2387 /* 2388 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 2389 * Set hw_ok=0 on failure 2390 */ 2391 void 2392 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2393 int compatchan, int interface) 2394 { 2395 struct channel_softc *wdc_cp = &cp->wdc_channel; 2396 2397 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2398 return; 2399 2400 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 2401 } 2402 2403 void 2404 pciide_print_channels(int nchannels, pcireg_t interface) 2405 { 2406 int i; 2407 2408 for (i = 0; i < nchannels; i++) { 2409 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 2410 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 2411 "configured" : "wired", 2412 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 2413 "compatibility"); 2414 } 2415 2416 printf("\n"); 2417 } 2418 2419 void 2420 pciide_print_modes(struct pciide_channel *cp) 2421 { 2422 wdc_print_current_modes(&cp->wdc_channel); 2423 } 2424 2425 void 2426 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2427 { 2428 struct pciide_channel *cp; 2429 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2430 pcireg_t csr; 2431 int channel, drive; 2432 struct ata_drive_datas *drvp; 2433 u_int8_t idedma_ctl; 2434 bus_size_t cmdsize, ctlsize; 2435 char *failreason; 2436 2437 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2438 printf(": DMA"); 2439 if (sc->sc_pp == &default_product_desc && 2440 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2441 PCIIDE_OPTIONS_DMA) == 0) { 2442 printf(" (unsupported)"); 2443 sc->sc_dma_ok = 0; 2444 } else { 2445 pciide_mapreg_dma(sc, pa); 2446 if (sc->sc_dma_ok != 0) 2447 printf(", (partial support)"); 2448 } 2449 } else { 2450 printf(": no DMA"); 2451 sc->sc_dma_ok = 0; 2452 } 2453 if (sc->sc_dma_ok) { 2454 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2455 sc->sc_wdcdev.irqack = pciide_irqack; 2456 } 2457 sc->sc_wdcdev.PIO_cap = 0; 2458 sc->sc_wdcdev.DMA_cap = 0; 2459 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2460 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2461 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2462 2463 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2464 2465 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2466 cp = &sc->pciide_channels[channel]; 2467 if (pciide_chansetup(sc, channel, interface) == 0) 2468 continue; 2469 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2470 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2471 &ctlsize, pciide_pci_intr); 2472 } else { 2473 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2474 channel, &cmdsize, &ctlsize); 2475 } 2476 if (cp->hw_ok == 0) 2477 continue; 2478 /* 2479 * Check to see if something appears to be there. 2480 */ 2481 failreason = NULL; 2482 pciide_map_compat_intr(pa, cp, channel, interface); 2483 if (cp->hw_ok == 0) 2484 continue; 2485 if (!wdcprobe(&cp->wdc_channel)) { 2486 failreason = "not responding; disabled or no drives?"; 2487 goto next; 2488 } 2489 /* 2490 * Now, make sure it's actually attributable to this PCI IDE 2491 * channel by trying to access the channel again while the 2492 * PCI IDE controller's I/O space is disabled. (If the 2493 * channel no longer appears to be there, it belongs to 2494 * this controller.) YUCK! 2495 */ 2496 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2497 PCI_COMMAND_STATUS_REG); 2498 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2499 csr & ~PCI_COMMAND_IO_ENABLE); 2500 if (wdcprobe(&cp->wdc_channel)) 2501 failreason = "other hardware responding at addresses"; 2502 pci_conf_write(sc->sc_pc, sc->sc_tag, 2503 PCI_COMMAND_STATUS_REG, csr); 2504 next: 2505 if (failreason) { 2506 printf("%s: %s ignored (%s)\n", 2507 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2508 failreason); 2509 cp->hw_ok = 0; 2510 pciide_unmap_compat_intr(pa, cp, channel, interface); 2511 bus_space_unmap(cp->wdc_channel.cmd_iot, 2512 cp->wdc_channel.cmd_ioh, cmdsize); 2513 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2514 bus_space_unmap(cp->wdc_channel.ctl_iot, 2515 cp->ctl_baseioh, ctlsize); 2516 else 2517 bus_space_unmap(cp->wdc_channel.ctl_iot, 2518 cp->wdc_channel.ctl_ioh, ctlsize); 2519 } 2520 if (cp->hw_ok) { 2521 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2522 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2523 wdcattach(&cp->wdc_channel); 2524 } 2525 } 2526 2527 if (sc->sc_dma_ok == 0) 2528 return; 2529 2530 /* Allocate DMA maps */ 2531 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2532 idedma_ctl = 0; 2533 cp = &sc->pciide_channels[channel]; 2534 for (drive = 0; drive < 2; drive++) { 2535 drvp = &cp->wdc_channel.ch_drive[drive]; 2536 /* If no drive, skip */ 2537 if ((drvp->drive_flags & DRIVE) == 0) 2538 continue; 2539 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2540 continue; 2541 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2542 /* Abort DMA setup */ 2543 printf("%s:%d:%d: cannot allocate DMA maps, " 2544 "using PIO transfers\n", 2545 sc->sc_wdcdev.sc_dev.dv_xname, 2546 channel, drive); 2547 drvp->drive_flags &= ~DRIVE_DMA; 2548 } 2549 printf("%s:%d:%d: using DMA data transfers\n", 2550 sc->sc_wdcdev.sc_dev.dv_xname, 2551 channel, drive); 2552 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2553 } 2554 if (idedma_ctl != 0) { 2555 /* Add software bits in status register */ 2556 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2557 } 2558 } 2559 } 2560 2561 void 2562 default_chip_unmap(struct pciide_softc *sc, int flags) 2563 { 2564 struct pciide_channel *cp; 2565 int channel; 2566 2567 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2568 cp = &sc->pciide_channels[channel]; 2569 pciide_unmap_chan(sc, cp, flags); 2570 pciide_chanfree(sc, channel); 2571 } 2572 2573 pciide_unmapreg_dma(sc); 2574 2575 if (sc->sc_cookie) 2576 free(sc->sc_cookie, M_DEVBUF, sc->sc_cookielen); 2577 } 2578 2579 void 2580 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2581 { 2582 struct pciide_channel *cp; 2583 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2584 int channel; 2585 bus_size_t cmdsize, ctlsize; 2586 2587 if (interface == 0) { 2588 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2589 DEBUG_PROBE); 2590 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2591 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2592 } 2593 2594 printf(": DMA"); 2595 pciide_mapreg_dma(sc, pa); 2596 printf("\n"); 2597 2598 if (sc->sc_dma_ok) { 2599 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2600 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2601 sc->sc_wdcdev.irqack = pciide_irqack; 2602 } 2603 sc->sc_wdcdev.PIO_cap = 4; 2604 sc->sc_wdcdev.DMA_cap = 2; 2605 sc->sc_wdcdev.UDMA_cap = 6; 2606 2607 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2608 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2609 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2610 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2611 sc->sc_wdcdev.set_modes = sata_setup_channel; 2612 sc->chip_unmap = default_chip_unmap; 2613 2614 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2615 cp = &sc->pciide_channels[channel]; 2616 if (pciide_chansetup(sc, channel, interface) == 0) 2617 continue; 2618 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2619 pciide_pci_intr); 2620 sata_setup_channel(&cp->wdc_channel); 2621 } 2622 } 2623 2624 void 2625 sata_setup_channel(struct channel_softc *chp) 2626 { 2627 struct ata_drive_datas *drvp; 2628 int drive; 2629 u_int32_t idedma_ctl; 2630 struct pciide_channel *cp = (struct pciide_channel *)chp; 2631 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2632 2633 /* setup DMA if needed */ 2634 pciide_channel_dma_setup(cp); 2635 2636 idedma_ctl = 0; 2637 2638 for (drive = 0; drive < 2; drive++) { 2639 drvp = &chp->ch_drive[drive]; 2640 /* If no drive, skip */ 2641 if ((drvp->drive_flags & DRIVE) == 0) 2642 continue; 2643 if (drvp->drive_flags & DRIVE_UDMA) { 2644 /* use Ultra/DMA */ 2645 drvp->drive_flags &= ~DRIVE_DMA; 2646 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2647 } else if (drvp->drive_flags & DRIVE_DMA) { 2648 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2649 } 2650 } 2651 2652 /* 2653 * Nothing to do to setup modes; it is meaningless in S-ATA 2654 * (but many S-ATA drives still want to get the SET_FEATURE 2655 * command). 2656 */ 2657 if (idedma_ctl != 0) { 2658 /* Add software bits in status register */ 2659 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2660 } 2661 pciide_print_modes(cp); 2662 } 2663 2664 void 2665 piix_timing_debug(struct pciide_softc *sc) 2666 { 2667 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2668 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2669 DEBUG_PROBE); 2670 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2671 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2672 WDCDEBUG_PRINT((", sidetim=0x%x", 2673 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2674 DEBUG_PROBE); 2675 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2676 WDCDEBUG_PRINT((", udmareg 0x%x", 2677 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2678 DEBUG_PROBE); 2679 } 2680 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2681 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2682 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2683 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2684 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2685 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2686 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2687 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2688 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2689 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2690 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2691 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2692 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2693 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2694 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2695 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2696 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2697 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2698 DEBUG_PROBE); 2699 } 2700 } 2701 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2702 } 2703 2704 void 2705 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2706 { 2707 struct pciide_channel *cp; 2708 int channel; 2709 u_int32_t idetim; 2710 bus_size_t cmdsize, ctlsize; 2711 2712 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2713 2714 printf(": DMA"); 2715 pciide_mapreg_dma(sc, pa); 2716 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2717 WDC_CAPABILITY_MODE; 2718 if (sc->sc_dma_ok) { 2719 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2720 sc->sc_wdcdev.irqack = pciide_irqack; 2721 switch (sc->sc_pp->ide_product) { 2722 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2723 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2724 case PCI_PRODUCT_INTEL_82371AB_IDE: 2725 case PCI_PRODUCT_INTEL_82372FB_IDE: 2726 case PCI_PRODUCT_INTEL_82440MX_IDE: 2727 case PCI_PRODUCT_INTEL_82451NX: 2728 case PCI_PRODUCT_INTEL_82801AA_IDE: 2729 case PCI_PRODUCT_INTEL_82801AB_IDE: 2730 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2731 case PCI_PRODUCT_INTEL_82801BA_IDE: 2732 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2733 case PCI_PRODUCT_INTEL_82801CA_IDE: 2734 case PCI_PRODUCT_INTEL_82801DB_IDE: 2735 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2736 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2737 case PCI_PRODUCT_INTEL_82801EB_IDE: 2738 case PCI_PRODUCT_INTEL_82801FB_IDE: 2739 case PCI_PRODUCT_INTEL_82801GB_IDE: 2740 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2741 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2742 break; 2743 } 2744 } 2745 sc->sc_wdcdev.PIO_cap = 4; 2746 sc->sc_wdcdev.DMA_cap = 2; 2747 switch (sc->sc_pp->ide_product) { 2748 case PCI_PRODUCT_INTEL_82801AA_IDE: 2749 case PCI_PRODUCT_INTEL_82372FB_IDE: 2750 sc->sc_wdcdev.UDMA_cap = 4; 2751 break; 2752 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2753 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2754 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2755 case PCI_PRODUCT_INTEL_82801BA_IDE: 2756 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2757 case PCI_PRODUCT_INTEL_82801CA_IDE: 2758 case PCI_PRODUCT_INTEL_82801DB_IDE: 2759 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2760 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2761 case PCI_PRODUCT_INTEL_82801EB_IDE: 2762 case PCI_PRODUCT_INTEL_82801FB_IDE: 2763 case PCI_PRODUCT_INTEL_82801GB_IDE: 2764 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2765 sc->sc_wdcdev.UDMA_cap = 5; 2766 break; 2767 default: 2768 sc->sc_wdcdev.UDMA_cap = 2; 2769 break; 2770 } 2771 2772 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2773 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2774 sc->sc_wdcdev.set_modes = piix_setup_channel; 2775 } else { 2776 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2777 } 2778 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2779 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2780 2781 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2782 2783 piix_timing_debug(sc); 2784 2785 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2786 cp = &sc->pciide_channels[channel]; 2787 2788 if (pciide_chansetup(sc, channel, interface) == 0) 2789 continue; 2790 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2791 if ((PIIX_IDETIM_READ(idetim, channel) & 2792 PIIX_IDETIM_IDE) == 0) { 2793 printf("%s: %s ignored (disabled)\n", 2794 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2795 cp->hw_ok = 0; 2796 continue; 2797 } 2798 pciide_map_compat_intr(pa, cp, channel, interface); 2799 if (cp->hw_ok == 0) 2800 continue; 2801 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2802 pciide_pci_intr); 2803 if (cp->hw_ok == 0) 2804 goto next; 2805 if (pciide_chan_candisable(cp)) { 2806 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2807 channel); 2808 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2809 idetim); 2810 } 2811 if (cp->hw_ok == 0) 2812 goto next; 2813 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2814 next: 2815 if (cp->hw_ok == 0) 2816 pciide_unmap_compat_intr(pa, cp, channel, interface); 2817 } 2818 2819 piix_timing_debug(sc); 2820 } 2821 2822 void 2823 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2824 { 2825 struct pciide_channel *cp; 2826 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2827 int channel; 2828 bus_size_t cmdsize, ctlsize; 2829 u_int8_t reg, ich = 0; 2830 2831 printf(": DMA"); 2832 pciide_mapreg_dma(sc, pa); 2833 2834 if (sc->sc_dma_ok) { 2835 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2836 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2837 sc->sc_wdcdev.irqack = pciide_irqack; 2838 sc->sc_wdcdev.DMA_cap = 2; 2839 sc->sc_wdcdev.UDMA_cap = 6; 2840 } 2841 sc->sc_wdcdev.PIO_cap = 4; 2842 2843 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2844 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2845 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2846 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2847 sc->sc_wdcdev.set_modes = sata_setup_channel; 2848 2849 switch(sc->sc_pp->ide_product) { 2850 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2851 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2852 case PCI_PRODUCT_INTEL_82801EB_SATA: 2853 case PCI_PRODUCT_INTEL_82801ER_SATA: 2854 ich = 5; 2855 break; 2856 case PCI_PRODUCT_INTEL_82801FB_SATA: 2857 case PCI_PRODUCT_INTEL_82801FR_SATA: 2858 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2859 ich = 6; 2860 break; 2861 default: 2862 ich = 7; 2863 break; 2864 } 2865 2866 /* 2867 * Put the SATA portion of controllers that don't operate in combined 2868 * mode into native PCI modes so the maximum number of devices can be 2869 * used. Intel calls this "enhanced mode" 2870 */ 2871 if (ich == 5) { 2872 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2873 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2874 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2875 ICH5_SATA_PI); 2876 reg |= ICH5_SATA_PI_PRI_NATIVE | 2877 ICH5_SATA_PI_SEC_NATIVE; 2878 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2879 ICH5_SATA_PI, reg); 2880 interface |= PCIIDE_INTERFACE_PCI(0) | 2881 PCIIDE_INTERFACE_PCI(1); 2882 } 2883 } else { 2884 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2885 ICH6_SATA_MAP_CMB_MASK; 2886 if (reg != ICH6_SATA_MAP_CMB_PRI && 2887 reg != ICH6_SATA_MAP_CMB_SEC) { 2888 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2889 ICH5_SATA_PI); 2890 reg |= ICH5_SATA_PI_PRI_NATIVE | 2891 ICH5_SATA_PI_SEC_NATIVE; 2892 2893 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2894 ICH5_SATA_PI, reg); 2895 interface |= PCIIDE_INTERFACE_PCI(0) | 2896 PCIIDE_INTERFACE_PCI(1); 2897 2898 /* 2899 * Ask for SATA IDE Mode, we don't need to do this 2900 * for the combined mode case as combined mode is 2901 * only allowed in IDE Mode 2902 */ 2903 if (ich >= 7) { 2904 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2905 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2906 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2907 ICH5_SATA_MAP, reg); 2908 } 2909 } 2910 } 2911 2912 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2913 2914 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2915 cp = &sc->pciide_channels[channel]; 2916 if (pciide_chansetup(sc, channel, interface) == 0) 2917 continue; 2918 2919 pciide_map_compat_intr(pa, cp, channel, interface); 2920 if (cp->hw_ok == 0) 2921 continue; 2922 2923 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2924 pciide_pci_intr); 2925 if (cp->hw_ok != 0) 2926 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2927 2928 if (cp->hw_ok == 0) 2929 pciide_unmap_compat_intr(pa, cp, channel, interface); 2930 } 2931 } 2932 2933 void 2934 piix_setup_channel(struct channel_softc *chp) 2935 { 2936 u_int8_t mode[2], drive; 2937 u_int32_t oidetim, idetim, idedma_ctl; 2938 struct pciide_channel *cp = (struct pciide_channel *)chp; 2939 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2940 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2941 2942 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2943 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2944 idedma_ctl = 0; 2945 2946 /* set up new idetim: Enable IDE registers decode */ 2947 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2948 chp->channel); 2949 2950 /* setup DMA */ 2951 pciide_channel_dma_setup(cp); 2952 2953 /* 2954 * Here we have to mess up with drives mode: PIIX can't have 2955 * different timings for master and slave drives. 2956 * We need to find the best combination. 2957 */ 2958 2959 /* If both drives supports DMA, take the lower mode */ 2960 if ((drvp[0].drive_flags & DRIVE_DMA) && 2961 (drvp[1].drive_flags & DRIVE_DMA)) { 2962 mode[0] = mode[1] = 2963 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2964 drvp[0].DMA_mode = mode[0]; 2965 drvp[1].DMA_mode = mode[1]; 2966 goto ok; 2967 } 2968 /* 2969 * If only one drive supports DMA, use its mode, and 2970 * put the other one in PIO mode 0 if mode not compatible 2971 */ 2972 if (drvp[0].drive_flags & DRIVE_DMA) { 2973 mode[0] = drvp[0].DMA_mode; 2974 mode[1] = drvp[1].PIO_mode; 2975 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2976 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2977 mode[1] = drvp[1].PIO_mode = 0; 2978 goto ok; 2979 } 2980 if (drvp[1].drive_flags & DRIVE_DMA) { 2981 mode[1] = drvp[1].DMA_mode; 2982 mode[0] = drvp[0].PIO_mode; 2983 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2984 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2985 mode[0] = drvp[0].PIO_mode = 0; 2986 goto ok; 2987 } 2988 /* 2989 * If both drives are not DMA, takes the lower mode, unless 2990 * one of them is PIO mode < 2 2991 */ 2992 if (drvp[0].PIO_mode < 2) { 2993 mode[0] = drvp[0].PIO_mode = 0; 2994 mode[1] = drvp[1].PIO_mode; 2995 } else if (drvp[1].PIO_mode < 2) { 2996 mode[1] = drvp[1].PIO_mode = 0; 2997 mode[0] = drvp[0].PIO_mode; 2998 } else { 2999 mode[0] = mode[1] = 3000 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 3001 drvp[0].PIO_mode = mode[0]; 3002 drvp[1].PIO_mode = mode[1]; 3003 } 3004 ok: /* The modes are setup */ 3005 for (drive = 0; drive < 2; drive++) { 3006 if (drvp[drive].drive_flags & DRIVE_DMA) { 3007 idetim |= piix_setup_idetim_timings( 3008 mode[drive], 1, chp->channel); 3009 goto end; 3010 } 3011 } 3012 /* If we are there, none of the drives are DMA */ 3013 if (mode[0] >= 2) 3014 idetim |= piix_setup_idetim_timings( 3015 mode[0], 0, chp->channel); 3016 else 3017 idetim |= piix_setup_idetim_timings( 3018 mode[1], 0, chp->channel); 3019 end: /* 3020 * timing mode is now set up in the controller. Enable 3021 * it per-drive 3022 */ 3023 for (drive = 0; drive < 2; drive++) { 3024 /* If no drive, skip */ 3025 if ((drvp[drive].drive_flags & DRIVE) == 0) 3026 continue; 3027 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 3028 if (drvp[drive].drive_flags & DRIVE_DMA) 3029 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3030 } 3031 if (idedma_ctl != 0) { 3032 /* Add software bits in status register */ 3033 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3034 IDEDMA_CTL(chp->channel), 3035 idedma_ctl); 3036 } 3037 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 3038 pciide_print_modes(cp); 3039 } 3040 3041 void 3042 piix3_4_setup_channel(struct channel_softc *chp) 3043 { 3044 struct ata_drive_datas *drvp; 3045 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 3046 struct pciide_channel *cp = (struct pciide_channel *)chp; 3047 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3048 int drive; 3049 int channel = chp->channel; 3050 3051 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 3052 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 3053 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 3054 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 3055 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 3056 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 3057 PIIX_SIDETIM_RTC_MASK(channel)); 3058 3059 idedma_ctl = 0; 3060 /* If channel disabled, no need to go further */ 3061 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 3062 return; 3063 /* set up new idetim: Enable IDE registers decode */ 3064 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 3065 3066 /* setup DMA if needed */ 3067 pciide_channel_dma_setup(cp); 3068 3069 for (drive = 0; drive < 2; drive++) { 3070 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 3071 PIIX_UDMATIM_SET(0x3, channel, drive)); 3072 drvp = &chp->ch_drive[drive]; 3073 /* If no drive, skip */ 3074 if ((drvp->drive_flags & DRIVE) == 0) 3075 continue; 3076 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3077 (drvp->drive_flags & DRIVE_UDMA) == 0)) 3078 goto pio; 3079 3080 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 3081 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 3082 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 3083 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 3084 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 3085 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 3086 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 3087 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 3088 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 3089 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 3090 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 3091 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 3092 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 3093 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 3094 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 3095 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 3096 ideconf |= PIIX_CONFIG_PINGPONG; 3097 } 3098 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 3099 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 3100 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 3101 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 3102 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 3103 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 3104 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 3105 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 3106 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 3107 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 3108 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 3109 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 3110 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 3111 /* setup Ultra/100 */ 3112 if (drvp->UDMA_mode > 2 && 3113 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 3114 drvp->UDMA_mode = 2; 3115 if (drvp->UDMA_mode > 4) { 3116 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 3117 } else { 3118 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 3119 if (drvp->UDMA_mode > 2) { 3120 ideconf |= PIIX_CONFIG_UDMA66(channel, 3121 drive); 3122 } else { 3123 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 3124 drive); 3125 } 3126 } 3127 } 3128 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 3129 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 3130 /* setup Ultra/66 */ 3131 if (drvp->UDMA_mode > 2 && 3132 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 3133 drvp->UDMA_mode = 2; 3134 if (drvp->UDMA_mode > 2) 3135 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 3136 else 3137 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 3138 } 3139 3140 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3141 (drvp->drive_flags & DRIVE_UDMA)) { 3142 /* use Ultra/DMA */ 3143 drvp->drive_flags &= ~DRIVE_DMA; 3144 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 3145 udmareg |= PIIX_UDMATIM_SET( 3146 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 3147 } else { 3148 /* use Multiword DMA */ 3149 drvp->drive_flags &= ~DRIVE_UDMA; 3150 if (drive == 0) { 3151 idetim |= piix_setup_idetim_timings( 3152 drvp->DMA_mode, 1, channel); 3153 } else { 3154 sidetim |= piix_setup_sidetim_timings( 3155 drvp->DMA_mode, 1, channel); 3156 idetim = PIIX_IDETIM_SET(idetim, 3157 PIIX_IDETIM_SITRE, channel); 3158 } 3159 } 3160 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3161 3162 pio: /* use PIO mode */ 3163 idetim |= piix_setup_idetim_drvs(drvp); 3164 if (drive == 0) { 3165 idetim |= piix_setup_idetim_timings( 3166 drvp->PIO_mode, 0, channel); 3167 } else { 3168 sidetim |= piix_setup_sidetim_timings( 3169 drvp->PIO_mode, 0, channel); 3170 idetim = PIIX_IDETIM_SET(idetim, 3171 PIIX_IDETIM_SITRE, channel); 3172 } 3173 } 3174 if (idedma_ctl != 0) { 3175 /* Add software bits in status register */ 3176 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3177 IDEDMA_CTL(channel), 3178 idedma_ctl); 3179 } 3180 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 3181 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 3182 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 3183 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 3184 pciide_print_modes(cp); 3185 } 3186 3187 3188 /* setup ISP and RTC fields, based on mode */ 3189 u_int32_t 3190 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3191 { 3192 3193 if (dma) 3194 return (PIIX_IDETIM_SET(0, 3195 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 3196 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 3197 channel)); 3198 else 3199 return (PIIX_IDETIM_SET(0, 3200 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 3201 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 3202 channel)); 3203 } 3204 3205 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 3206 u_int32_t 3207 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 3208 { 3209 u_int32_t ret = 0; 3210 struct channel_softc *chp = drvp->chnl_softc; 3211 u_int8_t channel = chp->channel; 3212 u_int8_t drive = drvp->drive; 3213 3214 /* 3215 * If drive is using UDMA, timings setups are independant 3216 * So just check DMA and PIO here. 3217 */ 3218 if (drvp->drive_flags & DRIVE_DMA) { 3219 /* if mode = DMA mode 0, use compatible timings */ 3220 if ((drvp->drive_flags & DRIVE_DMA) && 3221 drvp->DMA_mode == 0) { 3222 drvp->PIO_mode = 0; 3223 return (ret); 3224 } 3225 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3226 /* 3227 * PIO and DMA timings are the same, use fast timings for PIO 3228 * too, else use compat timings. 3229 */ 3230 if ((piix_isp_pio[drvp->PIO_mode] != 3231 piix_isp_dma[drvp->DMA_mode]) || 3232 (piix_rtc_pio[drvp->PIO_mode] != 3233 piix_rtc_dma[drvp->DMA_mode])) 3234 drvp->PIO_mode = 0; 3235 /* if PIO mode <= 2, use compat timings for PIO */ 3236 if (drvp->PIO_mode <= 2) { 3237 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 3238 channel); 3239 return (ret); 3240 } 3241 } 3242 3243 /* 3244 * Now setup PIO modes. If mode < 2, use compat timings. 3245 * Else enable fast timings. Enable IORDY and prefetch/post 3246 * if PIO mode >= 3. 3247 */ 3248 3249 if (drvp->PIO_mode < 2) 3250 return (ret); 3251 3252 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3253 if (drvp->PIO_mode >= 3) { 3254 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 3255 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 3256 } 3257 return (ret); 3258 } 3259 3260 /* setup values in SIDETIM registers, based on mode */ 3261 u_int32_t 3262 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3263 { 3264 if (dma) 3265 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 3266 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 3267 else 3268 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 3269 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 3270 } 3271 3272 void 3273 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3274 { 3275 struct pciide_channel *cp; 3276 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3277 int channel; 3278 pcireg_t chanenable; 3279 bus_size_t cmdsize, ctlsize; 3280 3281 printf(": DMA"); 3282 pciide_mapreg_dma(sc, pa); 3283 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3284 WDC_CAPABILITY_MODE; 3285 if (sc->sc_dma_ok) { 3286 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3287 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3288 sc->sc_wdcdev.irqack = pciide_irqack; 3289 } 3290 sc->sc_wdcdev.PIO_cap = 4; 3291 sc->sc_wdcdev.DMA_cap = 2; 3292 switch (sc->sc_pp->ide_product) { 3293 case PCI_PRODUCT_AMD_8111_IDE: 3294 sc->sc_wdcdev.UDMA_cap = 6; 3295 break; 3296 case PCI_PRODUCT_AMD_766_IDE: 3297 case PCI_PRODUCT_AMD_PBC768_IDE: 3298 sc->sc_wdcdev.UDMA_cap = 5; 3299 break; 3300 default: 3301 sc->sc_wdcdev.UDMA_cap = 4; 3302 break; 3303 } 3304 sc->sc_wdcdev.set_modes = amd756_setup_channel; 3305 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3306 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3307 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 3308 3309 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3310 3311 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3312 cp = &sc->pciide_channels[channel]; 3313 if (pciide_chansetup(sc, channel, interface) == 0) 3314 continue; 3315 3316 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 3317 printf("%s: %s ignored (disabled)\n", 3318 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3319 cp->hw_ok = 0; 3320 continue; 3321 } 3322 pciide_map_compat_intr(pa, cp, channel, interface); 3323 if (cp->hw_ok == 0) 3324 continue; 3325 3326 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3327 pciide_pci_intr); 3328 3329 if (pciide_chan_candisable(cp)) { 3330 chanenable &= ~AMD756_CHAN_EN(channel); 3331 } 3332 if (cp->hw_ok == 0) { 3333 pciide_unmap_compat_intr(pa, cp, channel, interface); 3334 continue; 3335 } 3336 3337 amd756_setup_channel(&cp->wdc_channel); 3338 } 3339 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 3340 chanenable); 3341 return; 3342 } 3343 3344 void 3345 amd756_setup_channel(struct channel_softc *chp) 3346 { 3347 u_int32_t udmatim_reg, datatim_reg; 3348 u_int8_t idedma_ctl; 3349 int mode, drive; 3350 struct ata_drive_datas *drvp; 3351 struct pciide_channel *cp = (struct pciide_channel *)chp; 3352 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3353 pcireg_t chanenable; 3354 #ifndef PCIIDE_AMD756_ENABLEDMA 3355 int product = sc->sc_pp->ide_product; 3356 int rev = sc->sc_rev; 3357 #endif 3358 3359 idedma_ctl = 0; 3360 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 3361 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 3362 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 3363 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 3364 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 3365 AMD756_CHANSTATUS_EN); 3366 3367 /* setup DMA if needed */ 3368 pciide_channel_dma_setup(cp); 3369 3370 for (drive = 0; drive < 2; drive++) { 3371 drvp = &chp->ch_drive[drive]; 3372 /* If no drive, skip */ 3373 if ((drvp->drive_flags & DRIVE) == 0) 3374 continue; 3375 /* add timing values, setup DMA if needed */ 3376 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3377 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3378 mode = drvp->PIO_mode; 3379 goto pio; 3380 } 3381 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3382 (drvp->drive_flags & DRIVE_UDMA)) { 3383 /* use Ultra/DMA */ 3384 drvp->drive_flags &= ~DRIVE_DMA; 3385 3386 /* Check cable */ 3387 if ((chanenable & AMD756_CABLE(chp->channel, 3388 drive)) == 0 && drvp->UDMA_mode > 2) { 3389 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 3390 "cable not detected\n", drvp->drive_name, 3391 sc->sc_wdcdev.sc_dev.dv_xname, 3392 chp->channel, drive), DEBUG_PROBE); 3393 drvp->UDMA_mode = 2; 3394 } 3395 3396 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 3397 AMD756_UDMA_EN_MTH(chp->channel, drive) | 3398 AMD756_UDMA_TIME(chp->channel, drive, 3399 amd756_udma_tim[drvp->UDMA_mode]); 3400 /* can use PIO timings, MW DMA unused */ 3401 mode = drvp->PIO_mode; 3402 } else { 3403 /* use Multiword DMA, but only if revision is OK */ 3404 drvp->drive_flags &= ~DRIVE_UDMA; 3405 #ifndef PCIIDE_AMD756_ENABLEDMA 3406 /* 3407 * The workaround doesn't seem to be necessary 3408 * with all drives, so it can be disabled by 3409 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 3410 * triggered. 3411 */ 3412 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 3413 printf("%s:%d:%d: multi-word DMA disabled due " 3414 "to chip revision\n", 3415 sc->sc_wdcdev.sc_dev.dv_xname, 3416 chp->channel, drive); 3417 mode = drvp->PIO_mode; 3418 drvp->drive_flags &= ~DRIVE_DMA; 3419 goto pio; 3420 } 3421 #endif 3422 /* mode = min(pio, dma+2) */ 3423 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3424 mode = drvp->PIO_mode; 3425 else 3426 mode = drvp->DMA_mode + 2; 3427 } 3428 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3429 3430 pio: /* setup PIO mode */ 3431 if (mode <= 2) { 3432 drvp->DMA_mode = 0; 3433 drvp->PIO_mode = 0; 3434 mode = 0; 3435 } else { 3436 drvp->PIO_mode = mode; 3437 drvp->DMA_mode = mode - 2; 3438 } 3439 datatim_reg |= 3440 AMD756_DATATIM_PULSE(chp->channel, drive, 3441 amd756_pio_set[mode]) | 3442 AMD756_DATATIM_RECOV(chp->channel, drive, 3443 amd756_pio_rec[mode]); 3444 } 3445 if (idedma_ctl != 0) { 3446 /* Add software bits in status register */ 3447 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3448 IDEDMA_CTL(chp->channel), 3449 idedma_ctl); 3450 } 3451 pciide_print_modes(cp); 3452 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3453 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3454 } 3455 3456 void 3457 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3458 { 3459 struct pciide_channel *cp; 3460 pcireg_t interface; 3461 int no_ideconf = 0, channel; 3462 u_int32_t ideconf; 3463 bus_size_t cmdsize, ctlsize; 3464 pcitag_t tag; 3465 pcireg_t id, class; 3466 3467 /* 3468 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3469 */ 3470 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3471 interface = PCI_INTERFACE(pa->pa_class); 3472 } else { 3473 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3474 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3475 } 3476 3477 switch (PCI_PRODUCT(pa->pa_id)) { 3478 case PCI_PRODUCT_VIATECH_VT6410: 3479 case PCI_PRODUCT_VIATECH_VT6415: 3480 no_ideconf = 1; 3481 /* FALLTHROUGH */ 3482 case PCI_PRODUCT_VIATECH_CX700_IDE: 3483 case PCI_PRODUCT_VIATECH_VX700_IDE: 3484 case PCI_PRODUCT_VIATECH_VX855_IDE: 3485 case PCI_PRODUCT_VIATECH_VX900_IDE: 3486 printf(": ATA133"); 3487 sc->sc_wdcdev.UDMA_cap = 6; 3488 break; 3489 default: 3490 /* 3491 * Determine the DMA capabilities by looking at the 3492 * ISA bridge. 3493 */ 3494 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3495 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3496 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3497 3498 /* 3499 * XXX On the VT8237, the ISA bridge is on a different 3500 * device. 3501 */ 3502 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3503 pa->pa_device == 15) { 3504 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3505 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3506 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3507 } 3508 3509 switch (PCI_PRODUCT(id)) { 3510 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3511 if (PCI_REVISION(class) >= 0x02) { 3512 printf(": ATA33"); 3513 sc->sc_wdcdev.UDMA_cap = 2; 3514 } else { 3515 printf(": DMA"); 3516 sc->sc_wdcdev.UDMA_cap = 0; 3517 } 3518 break; 3519 case PCI_PRODUCT_VIATECH_VT82C596A: 3520 if (PCI_REVISION(class) >= 0x12) { 3521 printf(": ATA66"); 3522 sc->sc_wdcdev.UDMA_cap = 4; 3523 } else { 3524 printf(": ATA33"); 3525 sc->sc_wdcdev.UDMA_cap = 2; 3526 } 3527 break; 3528 3529 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3530 if (PCI_REVISION(class) >= 0x40) { 3531 printf(": ATA100"); 3532 sc->sc_wdcdev.UDMA_cap = 5; 3533 } else { 3534 printf(": ATA66"); 3535 sc->sc_wdcdev.UDMA_cap = 4; 3536 } 3537 break; 3538 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3539 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3540 printf(": ATA100"); 3541 sc->sc_wdcdev.UDMA_cap = 5; 3542 break; 3543 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3544 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3545 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3546 printf(": ATA133"); 3547 sc->sc_wdcdev.UDMA_cap = 6; 3548 break; 3549 default: 3550 printf(": DMA"); 3551 sc->sc_wdcdev.UDMA_cap = 0; 3552 break; 3553 } 3554 break; 3555 } 3556 3557 pciide_mapreg_dma(sc, pa); 3558 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3559 WDC_CAPABILITY_MODE; 3560 if (sc->sc_dma_ok) { 3561 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3562 sc->sc_wdcdev.irqack = pciide_irqack; 3563 if (sc->sc_wdcdev.UDMA_cap > 0) 3564 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3565 } 3566 sc->sc_wdcdev.PIO_cap = 4; 3567 sc->sc_wdcdev.DMA_cap = 2; 3568 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3569 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3570 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3571 3572 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3573 3574 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3575 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3576 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3577 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3578 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3579 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3580 DEBUG_PROBE); 3581 3582 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3583 cp = &sc->pciide_channels[channel]; 3584 if (pciide_chansetup(sc, channel, interface) == 0) 3585 continue; 3586 3587 if (no_ideconf == 0) { 3588 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, 3589 APO_IDECONF); 3590 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3591 printf("%s: %s ignored (disabled)\n", 3592 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3593 cp->hw_ok = 0; 3594 continue; 3595 } 3596 } 3597 pciide_map_compat_intr(pa, cp, channel, interface); 3598 if (cp->hw_ok == 0) 3599 continue; 3600 3601 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3602 pciide_pci_intr); 3603 if (cp->hw_ok == 0) { 3604 goto next; 3605 } 3606 if (pciide_chan_candisable(cp)) { 3607 if (no_ideconf == 0) { 3608 ideconf &= ~APO_IDECONF_EN(channel); 3609 pci_conf_write(sc->sc_pc, sc->sc_tag, 3610 APO_IDECONF, ideconf); 3611 } 3612 } 3613 3614 if (cp->hw_ok == 0) 3615 goto next; 3616 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3617 next: 3618 if (cp->hw_ok == 0) 3619 pciide_unmap_compat_intr(pa, cp, channel, interface); 3620 } 3621 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3622 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3623 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3624 } 3625 3626 void 3627 apollo_setup_channel(struct channel_softc *chp) 3628 { 3629 u_int32_t udmatim_reg, datatim_reg; 3630 u_int8_t idedma_ctl; 3631 int mode, drive; 3632 struct ata_drive_datas *drvp; 3633 struct pciide_channel *cp = (struct pciide_channel *)chp; 3634 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3635 3636 idedma_ctl = 0; 3637 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3638 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3639 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3640 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3641 3642 /* setup DMA if needed */ 3643 pciide_channel_dma_setup(cp); 3644 3645 /* 3646 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3647 * downgrade to Ultra/33 if needed 3648 */ 3649 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3650 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3651 /* both drives UDMA */ 3652 if (chp->ch_drive[0].UDMA_mode > 2 && 3653 chp->ch_drive[1].UDMA_mode <= 2) { 3654 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3655 chp->ch_drive[0].UDMA_mode = 2; 3656 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3657 chp->ch_drive[0].UDMA_mode <= 2) { 3658 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3659 chp->ch_drive[1].UDMA_mode = 2; 3660 } 3661 } 3662 3663 for (drive = 0; drive < 2; drive++) { 3664 drvp = &chp->ch_drive[drive]; 3665 /* If no drive, skip */ 3666 if ((drvp->drive_flags & DRIVE) == 0) 3667 continue; 3668 /* add timing values, setup DMA if needed */ 3669 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3670 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3671 mode = drvp->PIO_mode; 3672 goto pio; 3673 } 3674 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3675 (drvp->drive_flags & DRIVE_UDMA)) { 3676 /* use Ultra/DMA */ 3677 drvp->drive_flags &= ~DRIVE_DMA; 3678 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3679 APO_UDMA_EN_MTH(chp->channel, drive); 3680 if (sc->sc_wdcdev.UDMA_cap == 6) { 3681 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3682 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3683 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3684 /* 686b */ 3685 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3686 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3687 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3688 /* 596b or 686a */ 3689 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3690 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3691 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3692 } else { 3693 /* 596a or 586b */ 3694 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3695 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3696 } 3697 /* can use PIO timings, MW DMA unused */ 3698 mode = drvp->PIO_mode; 3699 } else { 3700 /* use Multiword DMA */ 3701 drvp->drive_flags &= ~DRIVE_UDMA; 3702 /* mode = min(pio, dma+2) */ 3703 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3704 mode = drvp->PIO_mode; 3705 else 3706 mode = drvp->DMA_mode + 2; 3707 } 3708 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3709 3710 pio: /* setup PIO mode */ 3711 if (mode <= 2) { 3712 drvp->DMA_mode = 0; 3713 drvp->PIO_mode = 0; 3714 mode = 0; 3715 } else { 3716 drvp->PIO_mode = mode; 3717 drvp->DMA_mode = mode - 2; 3718 } 3719 datatim_reg |= 3720 APO_DATATIM_PULSE(chp->channel, drive, 3721 apollo_pio_set[mode]) | 3722 APO_DATATIM_RECOV(chp->channel, drive, 3723 apollo_pio_rec[mode]); 3724 } 3725 if (idedma_ctl != 0) { 3726 /* Add software bits in status register */ 3727 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3728 IDEDMA_CTL(chp->channel), 3729 idedma_ctl); 3730 } 3731 pciide_print_modes(cp); 3732 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3733 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3734 } 3735 3736 void 3737 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3738 int channel) 3739 { 3740 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3741 bus_size_t cmdsize, ctlsize; 3742 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3743 pcireg_t interface; 3744 int one_channel; 3745 3746 /* 3747 * The 0648/0649 can be told to identify as a RAID controller. 3748 * In this case, we have to fake interface 3749 */ 3750 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3751 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3752 PCIIDE_INTERFACE_SETTABLE(1); 3753 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3754 CMD_CONF_DSA1) 3755 interface |= PCIIDE_INTERFACE_PCI(0) | 3756 PCIIDE_INTERFACE_PCI(1); 3757 } else { 3758 interface = PCI_INTERFACE(pa->pa_class); 3759 } 3760 3761 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3762 cp->name = PCIIDE_CHANNEL_NAME(channel); 3763 cp->wdc_channel.channel = channel; 3764 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3765 3766 /* 3767 * Older CMD64X doesn't have independant channels 3768 */ 3769 switch (sc->sc_pp->ide_product) { 3770 case PCI_PRODUCT_CMDTECH_649: 3771 one_channel = 0; 3772 break; 3773 default: 3774 one_channel = 1; 3775 break; 3776 } 3777 3778 if (channel > 0 && one_channel) { 3779 cp->wdc_channel.ch_queue = 3780 sc->pciide_channels[0].wdc_channel.ch_queue; 3781 } else { 3782 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 3783 } 3784 if (cp->wdc_channel.ch_queue == NULL) { 3785 printf( 3786 "%s: %s cannot allocate channel queue", 3787 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3788 return; 3789 } 3790 3791 /* 3792 * with a CMD PCI64x, if we get here, the first channel is enabled: 3793 * there's no way to disable the first channel without disabling 3794 * the whole device 3795 */ 3796 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3797 printf("%s: %s ignored (disabled)\n", 3798 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3799 cp->hw_ok = 0; 3800 return; 3801 } 3802 cp->hw_ok = 1; 3803 pciide_map_compat_intr(pa, cp, channel, interface); 3804 if (cp->hw_ok == 0) 3805 return; 3806 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3807 if (cp->hw_ok == 0) { 3808 pciide_unmap_compat_intr(pa, cp, channel, interface); 3809 return; 3810 } 3811 if (pciide_chan_candisable(cp)) { 3812 if (channel == 1) { 3813 ctrl &= ~CMD_CTRL_2PORT; 3814 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3815 CMD_CTRL, ctrl); 3816 pciide_unmap_compat_intr(pa, cp, channel, interface); 3817 } 3818 } 3819 } 3820 3821 int 3822 cmd_pci_intr(void *arg) 3823 { 3824 struct pciide_softc *sc = arg; 3825 struct pciide_channel *cp; 3826 struct channel_softc *wdc_cp; 3827 int i, rv, crv; 3828 u_int32_t priirq, secirq; 3829 3830 rv = 0; 3831 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3832 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3833 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3834 cp = &sc->pciide_channels[i]; 3835 wdc_cp = &cp->wdc_channel; 3836 /* If a compat channel skip. */ 3837 if (cp->compat) 3838 continue; 3839 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3840 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3841 crv = wdcintr(wdc_cp); 3842 if (crv == 0) { 3843 #if 0 3844 printf("%s:%d: bogus intr\n", 3845 sc->sc_wdcdev.sc_dev.dv_xname, i); 3846 #endif 3847 } else 3848 rv = 1; 3849 } 3850 } 3851 return (rv); 3852 } 3853 3854 void 3855 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3856 { 3857 int channel; 3858 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3859 3860 printf(": no DMA"); 3861 sc->sc_dma_ok = 0; 3862 3863 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3864 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3865 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3866 3867 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3868 3869 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3870 cmd_channel_map(pa, sc, channel); 3871 } 3872 } 3873 3874 void 3875 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3876 { 3877 struct pciide_channel *cp; 3878 int channel; 3879 int rev = sc->sc_rev; 3880 pcireg_t interface; 3881 3882 /* 3883 * The 0648/0649 can be told to identify as a RAID controller. 3884 * In this case, we have to fake interface 3885 */ 3886 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3887 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3888 PCIIDE_INTERFACE_SETTABLE(1); 3889 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3890 CMD_CONF_DSA1) 3891 interface |= PCIIDE_INTERFACE_PCI(0) | 3892 PCIIDE_INTERFACE_PCI(1); 3893 } else { 3894 interface = PCI_INTERFACE(pa->pa_class); 3895 } 3896 3897 printf(": DMA"); 3898 pciide_mapreg_dma(sc, pa); 3899 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3900 WDC_CAPABILITY_MODE; 3901 if (sc->sc_dma_ok) { 3902 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3903 switch (sc->sc_pp->ide_product) { 3904 case PCI_PRODUCT_CMDTECH_649: 3905 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3906 sc->sc_wdcdev.UDMA_cap = 5; 3907 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3908 break; 3909 case PCI_PRODUCT_CMDTECH_648: 3910 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3911 sc->sc_wdcdev.UDMA_cap = 4; 3912 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3913 break; 3914 case PCI_PRODUCT_CMDTECH_646: 3915 if (rev >= CMD0646U2_REV) { 3916 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3917 sc->sc_wdcdev.UDMA_cap = 2; 3918 } else if (rev >= CMD0646U_REV) { 3919 /* 3920 * Linux's driver claims that the 646U is broken 3921 * with UDMA. Only enable it if we know what we're 3922 * doing 3923 */ 3924 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3925 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3926 sc->sc_wdcdev.UDMA_cap = 2; 3927 #endif 3928 /* explicitly disable UDMA */ 3929 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3930 CMD_UDMATIM(0), 0); 3931 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3932 CMD_UDMATIM(1), 0); 3933 } 3934 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3935 break; 3936 default: 3937 sc->sc_wdcdev.irqack = pciide_irqack; 3938 } 3939 } 3940 3941 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3942 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3943 sc->sc_wdcdev.PIO_cap = 4; 3944 sc->sc_wdcdev.DMA_cap = 2; 3945 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3946 3947 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3948 3949 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3950 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3951 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3952 DEBUG_PROBE); 3953 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3954 cp = &sc->pciide_channels[channel]; 3955 cmd_channel_map(pa, sc, channel); 3956 if (cp->hw_ok == 0) 3957 continue; 3958 cmd0643_9_setup_channel(&cp->wdc_channel); 3959 } 3960 /* 3961 * note - this also makes sure we clear the irq disable and reset 3962 * bits 3963 */ 3964 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3965 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3966 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3967 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3968 DEBUG_PROBE); 3969 } 3970 3971 void 3972 cmd0643_9_setup_channel(struct channel_softc *chp) 3973 { 3974 struct ata_drive_datas *drvp; 3975 u_int8_t tim; 3976 u_int32_t idedma_ctl, udma_reg; 3977 int drive; 3978 struct pciide_channel *cp = (struct pciide_channel *)chp; 3979 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3980 3981 idedma_ctl = 0; 3982 /* setup DMA if needed */ 3983 pciide_channel_dma_setup(cp); 3984 3985 for (drive = 0; drive < 2; drive++) { 3986 drvp = &chp->ch_drive[drive]; 3987 /* If no drive, skip */ 3988 if ((drvp->drive_flags & DRIVE) == 0) 3989 continue; 3990 /* add timing values, setup DMA if needed */ 3991 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3992 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3993 if (drvp->drive_flags & DRIVE_UDMA) { 3994 /* UltraDMA on a 646U2, 0648 or 0649 */ 3995 drvp->drive_flags &= ~DRIVE_DMA; 3996 udma_reg = pciide_pci_read(sc->sc_pc, 3997 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3998 if (drvp->UDMA_mode > 2 && 3999 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 4000 CMD_BICSR) & 4001 CMD_BICSR_80(chp->channel)) == 0) { 4002 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 4003 "80-wire cable not detected\n", 4004 drvp->drive_name, 4005 sc->sc_wdcdev.sc_dev.dv_xname, 4006 chp->channel, drive), DEBUG_PROBE); 4007 drvp->UDMA_mode = 2; 4008 } 4009 if (drvp->UDMA_mode > 2) 4010 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 4011 else if (sc->sc_wdcdev.UDMA_cap > 2) 4012 udma_reg |= CMD_UDMATIM_UDMA33(drive); 4013 udma_reg |= CMD_UDMATIM_UDMA(drive); 4014 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 4015 CMD_UDMATIM_TIM_OFF(drive)); 4016 udma_reg |= 4017 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 4018 CMD_UDMATIM_TIM_OFF(drive)); 4019 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4020 CMD_UDMATIM(chp->channel), udma_reg); 4021 } else { 4022 /* 4023 * use Multiword DMA. 4024 * Timings will be used for both PIO and DMA, 4025 * so adjust DMA mode if needed 4026 * if we have a 0646U2/8/9, turn off UDMA 4027 */ 4028 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 4029 udma_reg = pciide_pci_read(sc->sc_pc, 4030 sc->sc_tag, 4031 CMD_UDMATIM(chp->channel)); 4032 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 4033 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4034 CMD_UDMATIM(chp->channel), 4035 udma_reg); 4036 } 4037 if (drvp->PIO_mode >= 3 && 4038 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 4039 drvp->DMA_mode = drvp->PIO_mode - 2; 4040 } 4041 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 4042 } 4043 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4044 } 4045 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4046 CMD_DATA_TIM(chp->channel, drive), tim); 4047 } 4048 if (idedma_ctl != 0) { 4049 /* Add software bits in status register */ 4050 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4051 IDEDMA_CTL(chp->channel), 4052 idedma_ctl); 4053 } 4054 pciide_print_modes(cp); 4055 #ifdef __sparc64__ 4056 /* 4057 * The Ultra 5 has a tendency to hang during reboot. This is due 4058 * to the PCI0646U asserting a PCI interrupt line when the chip 4059 * registers claim that it is not. Performing a reset at this 4060 * point appears to eliminate the symptoms. It is likely the 4061 * real cause is still lurking somewhere in the code. 4062 */ 4063 wdcreset(chp, SILENT); 4064 #endif /* __sparc64__ */ 4065 } 4066 4067 void 4068 cmd646_9_irqack(struct channel_softc *chp) 4069 { 4070 u_int32_t priirq, secirq; 4071 struct pciide_channel *cp = (struct pciide_channel *)chp; 4072 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4073 4074 if (chp->channel == 0) { 4075 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 4076 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 4077 } else { 4078 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 4079 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 4080 } 4081 pciide_irqack(chp); 4082 } 4083 4084 void 4085 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4086 { 4087 struct pciide_channel *cp; 4088 int channel; 4089 4090 printf("\n%s: bus-master DMA support present", 4091 sc->sc_wdcdev.sc_dev.dv_xname); 4092 pciide_mapreg_dma(sc, pa); 4093 printf("\n"); 4094 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4095 WDC_CAPABILITY_MODE; 4096 if (sc->sc_dma_ok) { 4097 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4098 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4099 sc->sc_wdcdev.UDMA_cap = 6; 4100 sc->sc_wdcdev.irqack = pciide_irqack; 4101 } 4102 4103 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4104 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4105 sc->sc_wdcdev.PIO_cap = 4; 4106 sc->sc_wdcdev.DMA_cap = 2; 4107 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 4108 4109 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 4110 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 4111 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 4112 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 4113 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4114 cp = &sc->pciide_channels[channel]; 4115 cmd680_channel_map(pa, sc, channel); 4116 if (cp->hw_ok == 0) 4117 continue; 4118 cmd680_setup_channel(&cp->wdc_channel); 4119 } 4120 } 4121 4122 void 4123 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 4124 int channel) 4125 { 4126 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4127 bus_size_t cmdsize, ctlsize; 4128 int interface, i, reg; 4129 static const u_int8_t init_val[] = 4130 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 4131 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 4132 4133 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 4134 interface = PCIIDE_INTERFACE_SETTABLE(0) | 4135 PCIIDE_INTERFACE_SETTABLE(1); 4136 interface |= PCIIDE_INTERFACE_PCI(0) | 4137 PCIIDE_INTERFACE_PCI(1); 4138 } else { 4139 interface = PCI_INTERFACE(pa->pa_class); 4140 } 4141 4142 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4143 cp->name = PCIIDE_CHANNEL_NAME(channel); 4144 cp->wdc_channel.channel = channel; 4145 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4146 4147 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4148 if (cp->wdc_channel.ch_queue == NULL) { 4149 printf("%s %s: " 4150 "cannot allocate channel queue", 4151 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4152 return; 4153 } 4154 4155 /* XXX */ 4156 reg = 0xa2 + channel * 16; 4157 for (i = 0; i < sizeof(init_val); i++) 4158 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 4159 4160 printf("%s: %s %s to %s mode\n", 4161 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 4162 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 4163 "configured" : "wired", 4164 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 4165 "native-PCI" : "compatibility"); 4166 4167 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 4168 if (cp->hw_ok == 0) 4169 return; 4170 pciide_map_compat_intr(pa, cp, channel, interface); 4171 } 4172 4173 void 4174 cmd680_setup_channel(struct channel_softc *chp) 4175 { 4176 struct ata_drive_datas *drvp; 4177 u_int8_t mode, off, scsc; 4178 u_int16_t val; 4179 u_int32_t idedma_ctl; 4180 int drive; 4181 struct pciide_channel *cp = (struct pciide_channel *)chp; 4182 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4183 pci_chipset_tag_t pc = sc->sc_pc; 4184 pcitag_t pa = sc->sc_tag; 4185 static const u_int8_t udma2_tbl[] = 4186 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 4187 static const u_int8_t udma_tbl[] = 4188 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 4189 static const u_int16_t dma_tbl[] = 4190 { 0x2208, 0x10c2, 0x10c1 }; 4191 static const u_int16_t pio_tbl[] = 4192 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 4193 4194 idedma_ctl = 0; 4195 pciide_channel_dma_setup(cp); 4196 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 4197 4198 for (drive = 0; drive < 2; drive++) { 4199 drvp = &chp->ch_drive[drive]; 4200 /* If no drive, skip */ 4201 if ((drvp->drive_flags & DRIVE) == 0) 4202 continue; 4203 mode &= ~(0x03 << (drive * 4)); 4204 if (drvp->drive_flags & DRIVE_UDMA) { 4205 drvp->drive_flags &= ~DRIVE_DMA; 4206 off = 0xa0 + chp->channel * 16; 4207 if (drvp->UDMA_mode > 2 && 4208 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 4209 drvp->UDMA_mode = 2; 4210 scsc = pciide_pci_read(pc, pa, 0x8a); 4211 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 4212 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 4213 scsc = pciide_pci_read(pc, pa, 0x8a); 4214 if ((scsc & 0x30) == 0) 4215 drvp->UDMA_mode = 5; 4216 } 4217 mode |= 0x03 << (drive * 4); 4218 off = 0xac + chp->channel * 16 + drive * 2; 4219 val = pciide_pci_read(pc, pa, off) & ~0x3f; 4220 if (scsc & 0x30) 4221 val |= udma2_tbl[drvp->UDMA_mode]; 4222 else 4223 val |= udma_tbl[drvp->UDMA_mode]; 4224 pciide_pci_write(pc, pa, off, val); 4225 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4226 } else if (drvp->drive_flags & DRIVE_DMA) { 4227 mode |= 0x02 << (drive * 4); 4228 off = 0xa8 + chp->channel * 16 + drive * 2; 4229 val = dma_tbl[drvp->DMA_mode]; 4230 pciide_pci_write(pc, pa, off, val & 0xff); 4231 pciide_pci_write(pc, pa, off, val >> 8); 4232 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4233 } else { 4234 mode |= 0x01 << (drive * 4); 4235 off = 0xa4 + chp->channel * 16 + drive * 2; 4236 val = pio_tbl[drvp->PIO_mode]; 4237 pciide_pci_write(pc, pa, off, val & 0xff); 4238 pciide_pci_write(pc, pa, off, val >> 8); 4239 } 4240 } 4241 4242 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 4243 if (idedma_ctl != 0) { 4244 /* Add software bits in status register */ 4245 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4246 IDEDMA_CTL(chp->channel), 4247 idedma_ctl); 4248 } 4249 pciide_print_modes(cp); 4250 } 4251 4252 /* 4253 * When the Silicon Image 3112 retries a PCI memory read command, 4254 * it may retry it as a memory read multiple command under some 4255 * circumstances. This can totally confuse some PCI controllers, 4256 * so ensure that it will never do this by making sure that the 4257 * Read Threshold (FIFO Read Request Control) field of the FIFO 4258 * Valid Byte Count and Control registers for both channels (BA5 4259 * offset 0x40 and 0x44) are set to be at least as large as the 4260 * cacheline size register. 4261 */ 4262 void 4263 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 4264 { 4265 pcireg_t cls, reg40, reg44; 4266 4267 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4268 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 4269 cls *= 4; 4270 if (cls > 224) { 4271 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4272 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 4273 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 4274 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 4275 cls = 224; 4276 } 4277 if (cls < 32) 4278 cls = 32; 4279 cls = (cls + 31) / 32; 4280 reg40 = ba5_read_4(sc, 0x40); 4281 reg44 = ba5_read_4(sc, 0x44); 4282 if ((reg40 & 0x7) < cls) 4283 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 4284 if ((reg44 & 0x7) < cls) 4285 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 4286 } 4287 4288 void 4289 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4290 { 4291 struct pciide_channel *cp; 4292 bus_size_t cmdsize, ctlsize; 4293 pcireg_t interface, scs_cmd, cfgctl; 4294 int channel; 4295 struct pciide_satalink *sl; 4296 4297 /* Allocate memory for private data */ 4298 sc->sc_cookielen = sizeof(*sl); 4299 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 4300 sl = sc->sc_cookie; 4301 4302 sc->chip_unmap = default_chip_unmap; 4303 4304 #define SII3112_RESET_BITS \ 4305 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4306 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4307 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 4308 4309 /* 4310 * Reset everything and then unblock all of the interrupts. 4311 */ 4312 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4313 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4314 scs_cmd | SII3112_RESET_BITS); 4315 delay(50 * 1000); 4316 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4317 scs_cmd & SCS_CMD_BA5_EN); 4318 delay(50 * 1000); 4319 4320 if (scs_cmd & SCS_CMD_BA5_EN) { 4321 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4322 PCI_MAPREG_TYPE_MEM | 4323 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4324 &sl->ba5_st, &sl->ba5_sh, 4325 NULL, NULL, 0) != 0) 4326 printf(": unable to map BA5 register space\n"); 4327 else 4328 sl->ba5_en = 1; 4329 } else { 4330 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 4331 SII3112_PCI_CFGCTL); 4332 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 4333 cfgctl | CFGCTL_BA5INDEN); 4334 } 4335 4336 printf(": DMA"); 4337 pciide_mapreg_dma(sc, pa); 4338 printf("\n"); 4339 4340 /* 4341 * Rev. <= 0x01 of the 3112 have a bug that can cause data 4342 * corruption if DMA transfers cross an 8K boundary. This is 4343 * apparently hard to tickle, but we'll go ahead and play it 4344 * safe. 4345 */ 4346 if (sc->sc_rev <= 0x01) { 4347 sc->sc_dma_maxsegsz = 8192; 4348 sc->sc_dma_boundary = 8192; 4349 } 4350 4351 sii_fixup_cacheline(sc, pa); 4352 4353 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4354 sc->sc_wdcdev.PIO_cap = 4; 4355 if (sc->sc_dma_ok) { 4356 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4357 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4358 sc->sc_wdcdev.irqack = pciide_irqack; 4359 sc->sc_wdcdev.DMA_cap = 2; 4360 sc->sc_wdcdev.UDMA_cap = 6; 4361 } 4362 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4363 4364 /* We can use SControl and SStatus to probe for drives. */ 4365 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4366 4367 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4368 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4369 4370 /* 4371 * The 3112 either identifies itself as a RAID storage device 4372 * or a Misc storage device. Fake up the interface bits for 4373 * what our driver expects. 4374 */ 4375 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4376 interface = PCI_INTERFACE(pa->pa_class); 4377 } else { 4378 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4379 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4380 } 4381 4382 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4383 cp = &sc->pciide_channels[channel]; 4384 if (pciide_chansetup(sc, channel, interface) == 0) 4385 continue; 4386 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4387 pciide_pci_intr); 4388 if (cp->hw_ok == 0) 4389 continue; 4390 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4391 } 4392 } 4393 4394 void 4395 sii3112_setup_channel(struct channel_softc *chp) 4396 { 4397 struct ata_drive_datas *drvp; 4398 int drive; 4399 u_int32_t idedma_ctl, dtm; 4400 struct pciide_channel *cp = (struct pciide_channel *)chp; 4401 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4402 4403 /* setup DMA if needed */ 4404 pciide_channel_dma_setup(cp); 4405 4406 idedma_ctl = 0; 4407 dtm = 0; 4408 4409 for (drive = 0; drive < 2; drive++) { 4410 drvp = &chp->ch_drive[drive]; 4411 /* If no drive, skip */ 4412 if ((drvp->drive_flags & DRIVE) == 0) 4413 continue; 4414 if (drvp->drive_flags & DRIVE_UDMA) { 4415 /* use Ultra/DMA */ 4416 drvp->drive_flags &= ~DRIVE_DMA; 4417 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4418 dtm |= DTM_IDEx_DMA; 4419 } else if (drvp->drive_flags & DRIVE_DMA) { 4420 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4421 dtm |= DTM_IDEx_DMA; 4422 } else { 4423 dtm |= DTM_IDEx_PIO; 4424 } 4425 } 4426 4427 /* 4428 * Nothing to do to setup modes; it is meaningless in S-ATA 4429 * (but many S-ATA drives still want to get the SET_FEATURE 4430 * command). 4431 */ 4432 if (idedma_ctl != 0) { 4433 /* Add software bits in status register */ 4434 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 4435 } 4436 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 4437 pciide_print_modes(cp); 4438 } 4439 4440 void 4441 sii3112_drv_probe(struct channel_softc *chp) 4442 { 4443 struct pciide_channel *cp = (struct pciide_channel *)chp; 4444 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4445 uint32_t scontrol, sstatus; 4446 uint8_t scnt, sn, cl, ch; 4447 int s; 4448 4449 /* 4450 * The 3112 is a 2-port part, and only has one drive per channel 4451 * (each port emulates a master drive). 4452 * 4453 * The 3114 is similar, but has 4 channels. 4454 */ 4455 4456 /* 4457 * Request communication initialization sequence, any speed. 4458 * Performing this is the equivalent of an ATA Reset. 4459 */ 4460 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4461 4462 /* 4463 * XXX We don't yet support SATA power management; disable all 4464 * power management state transitions. 4465 */ 4466 scontrol |= SControl_IPM_NONE; 4467 4468 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4469 delay(50 * 1000); 4470 scontrol &= ~SControl_DET_INIT; 4471 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4472 delay(50 * 1000); 4473 4474 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4475 #if 0 4476 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4477 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4478 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4479 #endif 4480 switch (sstatus & SStatus_DET_mask) { 4481 case SStatus_DET_NODEV: 4482 /* No device; be silent. */ 4483 break; 4484 4485 case SStatus_DET_DEV_NE: 4486 printf("%s: port %d: device connected, but " 4487 "communication not established\n", 4488 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4489 break; 4490 4491 case SStatus_DET_OFFLINE: 4492 printf("%s: port %d: PHY offline\n", 4493 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4494 break; 4495 4496 case SStatus_DET_DEV: 4497 /* 4498 * XXX ATAPI detection doesn't currently work. Don't 4499 * XXX know why. But, it's not like the standard method 4500 * XXX can detect an ATAPI device connected via a SATA/PATA 4501 * XXX bridge, so at least this is no worse. --thorpej 4502 */ 4503 if (chp->_vtbl != NULL) 4504 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4505 else 4506 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4507 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4508 delay(10); /* 400ns delay */ 4509 /* Save register contents. */ 4510 if (chp->_vtbl != NULL) { 4511 scnt = CHP_READ_REG(chp, wdr_seccnt); 4512 sn = CHP_READ_REG(chp, wdr_sector); 4513 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4514 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4515 } else { 4516 scnt = bus_space_read_1(chp->cmd_iot, 4517 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4518 sn = bus_space_read_1(chp->cmd_iot, 4519 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4520 cl = bus_space_read_1(chp->cmd_iot, 4521 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4522 ch = bus_space_read_1(chp->cmd_iot, 4523 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4524 } 4525 #if 0 4526 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4527 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4528 scnt, sn, cl, ch); 4529 #endif 4530 /* 4531 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4532 * cases we get wrong values here, so ignore it. 4533 */ 4534 s = splbio(); 4535 if (cl == 0x14 && ch == 0xeb) 4536 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4537 else 4538 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4539 splx(s); 4540 4541 printf("%s: port %d", 4542 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4543 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4544 case 1: 4545 printf(": 1.5Gb/s"); 4546 break; 4547 case 2: 4548 printf(": 3.0Gb/s"); 4549 break; 4550 } 4551 printf("\n"); 4552 break; 4553 4554 default: 4555 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4556 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4557 } 4558 } 4559 4560 void 4561 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4562 { 4563 struct pciide_channel *cp; 4564 pcireg_t scs_cmd; 4565 pci_intr_handle_t intrhandle; 4566 const char *intrstr; 4567 int channel; 4568 struct pciide_satalink *sl; 4569 4570 /* Allocate memory for private data */ 4571 sc->sc_cookielen = sizeof(*sl); 4572 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 4573 sl = sc->sc_cookie; 4574 4575 #define SII3114_RESET_BITS \ 4576 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4577 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4578 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4579 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4580 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4581 4582 /* 4583 * Reset everything and then unblock all of the interrupts. 4584 */ 4585 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4586 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4587 scs_cmd | SII3114_RESET_BITS); 4588 delay(50 * 1000); 4589 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4590 scs_cmd & SCS_CMD_M66EN); 4591 delay(50 * 1000); 4592 4593 /* 4594 * On the 3114, the BA5 register space is always enabled. In 4595 * order to use the 3114 in any sane way, we must use this BA5 4596 * register space, and so we consider it an error if we cannot 4597 * map it. 4598 * 4599 * As a consequence of using BA5, our register mapping is different 4600 * from a normal PCI IDE controller's, and so we are unable to use 4601 * most of the common PCI IDE register mapping functions. 4602 */ 4603 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4604 PCI_MAPREG_TYPE_MEM | 4605 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4606 &sl->ba5_st, &sl->ba5_sh, 4607 NULL, NULL, 0) != 0) { 4608 printf(": unable to map BA5 register space\n"); 4609 return; 4610 } 4611 sl->ba5_en = 1; 4612 4613 /* 4614 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4615 * channel 2. This is required at all times for proper operation 4616 * when using the BA5 register space (otherwise interrupts from 4617 * all 4 channels won't work). 4618 */ 4619 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4620 4621 printf(": DMA"); 4622 sii3114_mapreg_dma(sc, pa); 4623 printf("\n"); 4624 4625 sii_fixup_cacheline(sc, pa); 4626 4627 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4628 sc->sc_wdcdev.PIO_cap = 4; 4629 if (sc->sc_dma_ok) { 4630 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4631 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4632 sc->sc_wdcdev.irqack = pciide_irqack; 4633 sc->sc_wdcdev.DMA_cap = 2; 4634 sc->sc_wdcdev.UDMA_cap = 6; 4635 } 4636 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4637 4638 /* We can use SControl and SStatus to probe for drives. */ 4639 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4640 4641 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4642 sc->sc_wdcdev.nchannels = 4; 4643 4644 /* Map and establish the interrupt handler. */ 4645 if (pci_intr_map(pa, &intrhandle) != 0) { 4646 printf("%s: couldn't map native-PCI interrupt\n", 4647 sc->sc_wdcdev.sc_dev.dv_xname); 4648 return; 4649 } 4650 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4651 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4652 /* XXX */ 4653 pciide_pci_intr, sc, 4654 sc->sc_wdcdev.sc_dev.dv_xname); 4655 if (sc->sc_pci_ih != NULL) { 4656 printf("%s: using %s for native-PCI interrupt\n", 4657 sc->sc_wdcdev.sc_dev.dv_xname, 4658 intrstr ? intrstr : "unknown interrupt"); 4659 } else { 4660 printf("%s: couldn't establish native-PCI interrupt", 4661 sc->sc_wdcdev.sc_dev.dv_xname); 4662 if (intrstr != NULL) 4663 printf(" at %s", intrstr); 4664 printf("\n"); 4665 return; 4666 } 4667 4668 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4669 cp = &sc->pciide_channels[channel]; 4670 if (sii3114_chansetup(sc, channel) == 0) 4671 continue; 4672 sii3114_mapchan(cp); 4673 if (cp->hw_ok == 0) 4674 continue; 4675 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4676 } 4677 } 4678 4679 void 4680 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4681 { 4682 int chan, reg; 4683 bus_size_t size; 4684 struct pciide_satalink *sl = sc->sc_cookie; 4685 4686 sc->sc_wdcdev.dma_arg = sc; 4687 sc->sc_wdcdev.dma_init = pciide_dma_init; 4688 sc->sc_wdcdev.dma_start = pciide_dma_start; 4689 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4690 4691 /* 4692 * Slice off a subregion of BA5 for each of the channel's DMA 4693 * registers. 4694 */ 4695 4696 sc->sc_dma_iot = sl->ba5_st; 4697 for (chan = 0; chan < 4; chan++) { 4698 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4699 size = 4; 4700 if (size > (IDEDMA_SCH_OFFSET - reg)) 4701 size = IDEDMA_SCH_OFFSET - reg; 4702 if (bus_space_subregion(sl->ba5_st, 4703 sl->ba5_sh, 4704 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4705 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4706 sc->sc_dma_ok = 0; 4707 printf(": can't subregion offset " 4708 "%lu size %lu", 4709 (u_long) satalink_ba5_regmap[ 4710 chan].ba5_IDEDMA_CMD + reg, 4711 (u_long) size); 4712 return; 4713 } 4714 } 4715 } 4716 4717 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4718 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4719 sc->sc_dmactl_read = sii3114_dmactl_read; 4720 sc->sc_dmactl_write = sii3114_dmactl_write; 4721 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4722 4723 /* DMA registers all set up! */ 4724 sc->sc_dmat = pa->pa_dmat; 4725 sc->sc_dma_ok = 1; 4726 } 4727 4728 int 4729 sii3114_chansetup(struct pciide_softc *sc, int channel) 4730 { 4731 static const char *channel_names[] = { 4732 "port 0", 4733 "port 1", 4734 "port 2", 4735 "port 3", 4736 }; 4737 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4738 4739 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4740 4741 /* 4742 * We must always keep the Interrupt Steering bit set in channel 2's 4743 * IDEDMA_CMD register. 4744 */ 4745 if (channel == 2) 4746 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4747 4748 cp->name = channel_names[channel]; 4749 cp->wdc_channel.channel = channel; 4750 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4751 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4752 if (cp->wdc_channel.ch_queue == NULL) { 4753 printf("%s %s channel: " 4754 "cannot allocate channel queue", 4755 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4756 return (0); 4757 } 4758 return (1); 4759 } 4760 4761 void 4762 sii3114_mapchan(struct pciide_channel *cp) 4763 { 4764 struct channel_softc *wdc_cp = &cp->wdc_channel; 4765 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4766 struct pciide_satalink *sl = sc->sc_cookie; 4767 int chan = wdc_cp->channel; 4768 int i; 4769 4770 cp->hw_ok = 0; 4771 cp->compat = 0; 4772 cp->ih = sc->sc_pci_ih; 4773 4774 sl->regs[chan].cmd_iot = sl->ba5_st; 4775 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4776 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4777 9, &sl->regs[chan].cmd_baseioh) != 0) { 4778 printf("%s: couldn't subregion %s cmd base\n", 4779 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4780 return; 4781 } 4782 4783 sl->regs[chan].ctl_iot = sl->ba5_st; 4784 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4785 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4786 1, &cp->ctl_baseioh) != 0) { 4787 printf("%s: couldn't subregion %s ctl base\n", 4788 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4789 return; 4790 } 4791 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4792 4793 for (i = 0; i < WDC_NREG; i++) { 4794 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4795 sl->regs[chan].cmd_baseioh, 4796 i, i == 0 ? 4 : 1, 4797 &sl->regs[chan].cmd_iohs[i]) != 0) { 4798 printf("%s: couldn't subregion %s channel " 4799 "cmd regs\n", 4800 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4801 return; 4802 } 4803 } 4804 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4805 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4806 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4807 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4808 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4809 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4810 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4811 wdcattach(wdc_cp); 4812 cp->hw_ok = 1; 4813 } 4814 4815 u_int8_t 4816 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4817 { 4818 struct pciide_channel *cp = (struct pciide_channel *)chp; 4819 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4820 struct pciide_satalink *sl = sc->sc_cookie; 4821 4822 if (reg & _WDC_AUX) 4823 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4824 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4825 else 4826 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4827 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4828 } 4829 4830 void 4831 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4832 { 4833 struct pciide_channel *cp = (struct pciide_channel *)chp; 4834 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4835 struct pciide_satalink *sl = sc->sc_cookie; 4836 4837 if (reg & _WDC_AUX) 4838 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4839 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4840 else 4841 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4842 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4843 0, val); 4844 } 4845 4846 u_int8_t 4847 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4848 { 4849 struct pciide_satalink *sl = sc->sc_cookie; 4850 4851 return (bus_space_read_1(sc->sc_dma_iot, 4852 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4853 } 4854 4855 void 4856 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4857 { 4858 struct pciide_satalink *sl = sc->sc_cookie; 4859 4860 bus_space_write_1(sc->sc_dma_iot, 4861 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4862 } 4863 4864 u_int8_t 4865 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4866 { 4867 struct pciide_satalink *sl = sc->sc_cookie; 4868 4869 return (bus_space_read_1(sc->sc_dma_iot, 4870 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4871 } 4872 4873 void 4874 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4875 { 4876 struct pciide_satalink *sl = sc->sc_cookie; 4877 4878 bus_space_write_1(sc->sc_dma_iot, 4879 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4880 } 4881 4882 void 4883 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4884 { 4885 struct pciide_satalink *sl = sc->sc_cookie; 4886 4887 bus_space_write_4(sc->sc_dma_iot, 4888 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4889 } 4890 4891 void 4892 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4893 { 4894 struct pciide_channel *cp; 4895 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4896 bus_size_t cmdsize, ctlsize; 4897 struct pciide_cy *cy; 4898 4899 /* Allocate memory for private data */ 4900 sc->sc_cookielen = sizeof(*cy); 4901 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 4902 cy = sc->sc_cookie; 4903 4904 /* 4905 * this chip has 2 PCI IDE functions, one for primary and one for 4906 * secondary. So we need to call pciide_mapregs_compat() with 4907 * the real channel 4908 */ 4909 if (pa->pa_function == 1) { 4910 cy->cy_compatchan = 0; 4911 } else if (pa->pa_function == 2) { 4912 cy->cy_compatchan = 1; 4913 } else { 4914 printf(": unexpected PCI function %d\n", pa->pa_function); 4915 return; 4916 } 4917 4918 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4919 printf(": DMA"); 4920 pciide_mapreg_dma(sc, pa); 4921 } else { 4922 printf(": no DMA"); 4923 sc->sc_dma_ok = 0; 4924 } 4925 4926 cy->cy_handle = cy82c693_init(pa->pa_iot); 4927 if (cy->cy_handle == NULL) { 4928 printf(", (unable to map ctl registers)"); 4929 sc->sc_dma_ok = 0; 4930 } 4931 4932 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4933 WDC_CAPABILITY_MODE; 4934 if (sc->sc_dma_ok) { 4935 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4936 sc->sc_wdcdev.irqack = pciide_irqack; 4937 } 4938 sc->sc_wdcdev.PIO_cap = 4; 4939 sc->sc_wdcdev.DMA_cap = 2; 4940 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4941 4942 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4943 sc->sc_wdcdev.nchannels = 1; 4944 4945 /* Only one channel for this chip; if we are here it's enabled */ 4946 cp = &sc->pciide_channels[0]; 4947 sc->wdc_chanarray[0] = &cp->wdc_channel; 4948 cp->name = PCIIDE_CHANNEL_NAME(0); 4949 cp->wdc_channel.channel = 0; 4950 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4951 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4952 if (cp->wdc_channel.ch_queue == NULL) { 4953 printf(": cannot allocate channel queue\n"); 4954 return; 4955 } 4956 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4957 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4958 "configured" : "wired"); 4959 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4960 printf("native-PCI\n"); 4961 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4962 pciide_pci_intr); 4963 } else { 4964 printf("compatibility\n"); 4965 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4966 &cmdsize, &ctlsize); 4967 } 4968 4969 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4970 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4971 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4972 if (cp->hw_ok == 0) 4973 return; 4974 wdcattach(&cp->wdc_channel); 4975 if (pciide_chan_candisable(cp)) { 4976 pci_conf_write(sc->sc_pc, sc->sc_tag, 4977 PCI_COMMAND_STATUS_REG, 0); 4978 } 4979 if (cp->hw_ok == 0) { 4980 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4981 interface); 4982 return; 4983 } 4984 4985 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4986 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4987 cy693_setup_channel(&cp->wdc_channel); 4988 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4989 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4990 } 4991 4992 void 4993 cy693_setup_channel(struct channel_softc *chp) 4994 { 4995 struct ata_drive_datas *drvp; 4996 int drive; 4997 u_int32_t cy_cmd_ctrl; 4998 u_int32_t idedma_ctl; 4999 struct pciide_channel *cp = (struct pciide_channel *)chp; 5000 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5001 int dma_mode = -1; 5002 struct pciide_cy *cy = sc->sc_cookie; 5003 5004 cy_cmd_ctrl = idedma_ctl = 0; 5005 5006 /* setup DMA if needed */ 5007 pciide_channel_dma_setup(cp); 5008 5009 for (drive = 0; drive < 2; drive++) { 5010 drvp = &chp->ch_drive[drive]; 5011 /* If no drive, skip */ 5012 if ((drvp->drive_flags & DRIVE) == 0) 5013 continue; 5014 /* add timing values, setup DMA if needed */ 5015 if (drvp->drive_flags & DRIVE_DMA) { 5016 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5017 /* use Multiword DMA */ 5018 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 5019 dma_mode = drvp->DMA_mode; 5020 } 5021 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 5022 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 5023 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 5024 CY_CMD_CTRL_IOW_REC_OFF(drive)); 5025 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 5026 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 5027 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 5028 CY_CMD_CTRL_IOR_REC_OFF(drive)); 5029 } 5030 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 5031 chp->ch_drive[0].DMA_mode = dma_mode; 5032 chp->ch_drive[1].DMA_mode = dma_mode; 5033 5034 if (dma_mode == -1) 5035 dma_mode = 0; 5036 5037 if (cy->cy_handle != NULL) { 5038 /* Note: `multiple' is implied. */ 5039 cy82c693_write(cy->cy_handle, 5040 (cy->cy_compatchan == 0) ? 5041 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 5042 } 5043 5044 pciide_print_modes(cp); 5045 5046 if (idedma_ctl != 0) { 5047 /* Add software bits in status register */ 5048 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5049 IDEDMA_CTL(chp->channel), idedma_ctl); 5050 } 5051 } 5052 5053 static struct sis_hostbr_type { 5054 u_int16_t id; 5055 u_int8_t rev; 5056 u_int8_t udma_mode; 5057 char *name; 5058 u_int8_t type; 5059 #define SIS_TYPE_NOUDMA 0 5060 #define SIS_TYPE_66 1 5061 #define SIS_TYPE_100OLD 2 5062 #define SIS_TYPE_100NEW 3 5063 #define SIS_TYPE_133OLD 4 5064 #define SIS_TYPE_133NEW 5 5065 #define SIS_TYPE_SOUTH 6 5066 } sis_hostbr_type[] = { 5067 /* Most infos here are from sos@freebsd.org */ 5068 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 5069 #if 0 5070 /* 5071 * controllers associated to a rev 0x2 530 Host to PCI Bridge 5072 * have problems with UDMA (info provided by Christos) 5073 */ 5074 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 5075 #endif 5076 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 5077 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 5078 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 5079 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 5080 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 5081 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 5082 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 5083 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 5084 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 5085 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 5086 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 5087 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 5088 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 5089 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 5090 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 5091 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 5092 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 5093 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 5094 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 5095 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 5096 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 5097 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 5098 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 5099 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 5100 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 5101 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 5102 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 5103 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 5104 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 5105 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 5106 /* 5107 * From sos@freebsd.org: the 0x961 ID will never be found in real world 5108 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 5109 */ 5110 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 5111 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 5112 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 5113 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW}, 5114 {PCI_PRODUCT_SIS_966, 0x00, 6, "966", SIS_TYPE_133NEW}, 5115 {PCI_PRODUCT_SIS_968, 0x00, 6, "968", SIS_TYPE_133NEW} 5116 }; 5117 5118 static struct sis_hostbr_type *sis_hostbr_type_match; 5119 5120 int 5121 sis_hostbr_match(struct pci_attach_args *pa) 5122 { 5123 int i; 5124 5125 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 5126 return (0); 5127 sis_hostbr_type_match = NULL; 5128 for (i = 0; 5129 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 5130 i++) { 5131 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 5132 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 5133 sis_hostbr_type_match = &sis_hostbr_type[i]; 5134 } 5135 return (sis_hostbr_type_match != NULL); 5136 } 5137 5138 int 5139 sis_south_match(struct pci_attach_args *pa) 5140 { 5141 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 5142 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 5143 PCI_REVISION(pa->pa_class) >= 0x10); 5144 } 5145 5146 void 5147 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5148 { 5149 struct pciide_channel *cp; 5150 int channel; 5151 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 5152 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5153 int rev = sc->sc_rev; 5154 bus_size_t cmdsize, ctlsize; 5155 struct pciide_sis *sis; 5156 5157 /* Allocate memory for private data */ 5158 sc->sc_cookielen = sizeof(*sis); 5159 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 5160 sis = sc->sc_cookie; 5161 5162 pci_find_device(NULL, sis_hostbr_match); 5163 5164 if (sis_hostbr_type_match) { 5165 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 5166 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 5167 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5168 SIS_REG_57) & 0x7f); 5169 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 5170 sis->sis_type = SIS_TYPE_133NEW; 5171 sc->sc_wdcdev.UDMA_cap = 5172 sis_hostbr_type_match->udma_mode; 5173 } else { 5174 if (pci_find_device(NULL, sis_south_match)) { 5175 sis->sis_type = SIS_TYPE_133OLD; 5176 sc->sc_wdcdev.UDMA_cap = 5177 sis_hostbr_type_match->udma_mode; 5178 } else { 5179 sis->sis_type = SIS_TYPE_100NEW; 5180 sc->sc_wdcdev.UDMA_cap = 5181 sis_hostbr_type_match->udma_mode; 5182 } 5183 } 5184 } else { 5185 sis->sis_type = sis_hostbr_type_match->type; 5186 sc->sc_wdcdev.UDMA_cap = 5187 sis_hostbr_type_match->udma_mode; 5188 } 5189 printf(": %s", sis_hostbr_type_match->name); 5190 } else { 5191 printf(": 5597/5598"); 5192 if (rev >= 0xd0) { 5193 sc->sc_wdcdev.UDMA_cap = 2; 5194 sis->sis_type = SIS_TYPE_66; 5195 } else { 5196 sc->sc_wdcdev.UDMA_cap = 0; 5197 sis->sis_type = SIS_TYPE_NOUDMA; 5198 } 5199 } 5200 5201 printf(": DMA"); 5202 pciide_mapreg_dma(sc, pa); 5203 5204 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5205 WDC_CAPABILITY_MODE; 5206 if (sc->sc_dma_ok) { 5207 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5208 sc->sc_wdcdev.irqack = pciide_irqack; 5209 if (sis->sis_type >= SIS_TYPE_66) 5210 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5211 } 5212 5213 sc->sc_wdcdev.PIO_cap = 4; 5214 sc->sc_wdcdev.DMA_cap = 2; 5215 5216 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5217 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5218 switch (sis->sis_type) { 5219 case SIS_TYPE_NOUDMA: 5220 case SIS_TYPE_66: 5221 case SIS_TYPE_100OLD: 5222 sc->sc_wdcdev.set_modes = sis_setup_channel; 5223 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 5224 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 5225 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 5226 break; 5227 case SIS_TYPE_100NEW: 5228 case SIS_TYPE_133OLD: 5229 sc->sc_wdcdev.set_modes = sis_setup_channel; 5230 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 5231 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 5232 break; 5233 case SIS_TYPE_133NEW: 5234 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 5235 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 5236 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 5237 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 5238 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 5239 break; 5240 } 5241 5242 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5243 5244 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5245 cp = &sc->pciide_channels[channel]; 5246 if (pciide_chansetup(sc, channel, interface) == 0) 5247 continue; 5248 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 5249 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 5250 printf("%s: %s ignored (disabled)\n", 5251 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5252 cp->hw_ok = 0; 5253 continue; 5254 } 5255 pciide_map_compat_intr(pa, cp, channel, interface); 5256 if (cp->hw_ok == 0) 5257 continue; 5258 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5259 pciide_pci_intr); 5260 if (cp->hw_ok == 0) { 5261 pciide_unmap_compat_intr(pa, cp, channel, interface); 5262 continue; 5263 } 5264 if (pciide_chan_candisable(cp)) { 5265 if (channel == 0) 5266 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 5267 else 5268 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 5269 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 5270 sis_ctr0); 5271 } 5272 if (cp->hw_ok == 0) { 5273 pciide_unmap_compat_intr(pa, cp, channel, interface); 5274 continue; 5275 } 5276 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5277 } 5278 } 5279 5280 void 5281 sis96x_setup_channel(struct channel_softc *chp) 5282 { 5283 struct ata_drive_datas *drvp; 5284 int drive; 5285 u_int32_t sis_tim; 5286 u_int32_t idedma_ctl; 5287 int regtim; 5288 struct pciide_channel *cp = (struct pciide_channel *)chp; 5289 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5290 5291 sis_tim = 0; 5292 idedma_ctl = 0; 5293 /* setup DMA if needed */ 5294 pciide_channel_dma_setup(cp); 5295 5296 for (drive = 0; drive < 2; drive++) { 5297 regtim = SIS_TIM133( 5298 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 5299 chp->channel, drive); 5300 drvp = &chp->ch_drive[drive]; 5301 /* If no drive, skip */ 5302 if ((drvp->drive_flags & DRIVE) == 0) 5303 continue; 5304 /* add timing values, setup DMA if needed */ 5305 if (drvp->drive_flags & DRIVE_UDMA) { 5306 /* use Ultra/DMA */ 5307 drvp->drive_flags &= ~DRIVE_DMA; 5308 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5309 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 5310 if (drvp->UDMA_mode > 2) 5311 drvp->UDMA_mode = 2; 5312 } 5313 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 5314 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5315 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5316 } else if (drvp->drive_flags & DRIVE_DMA) { 5317 /* 5318 * use Multiword DMA 5319 * Timings will be used for both PIO and DMA, 5320 * so adjust DMA mode if needed 5321 */ 5322 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5323 drvp->PIO_mode = drvp->DMA_mode + 2; 5324 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5325 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5326 drvp->PIO_mode - 2 : 0; 5327 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 5328 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5329 } else { 5330 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5331 } 5332 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 5333 "channel %d drive %d: 0x%x (reg 0x%x)\n", 5334 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 5335 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 5336 } 5337 if (idedma_ctl != 0) { 5338 /* Add software bits in status register */ 5339 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5340 IDEDMA_CTL(chp->channel), idedma_ctl); 5341 } 5342 pciide_print_modes(cp); 5343 } 5344 5345 void 5346 sis_setup_channel(struct channel_softc *chp) 5347 { 5348 struct ata_drive_datas *drvp; 5349 int drive; 5350 u_int32_t sis_tim; 5351 u_int32_t idedma_ctl; 5352 struct pciide_channel *cp = (struct pciide_channel *)chp; 5353 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5354 struct pciide_sis *sis = sc->sc_cookie; 5355 5356 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 5357 "channel %d 0x%x\n", chp->channel, 5358 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 5359 DEBUG_PROBE); 5360 sis_tim = 0; 5361 idedma_ctl = 0; 5362 /* setup DMA if needed */ 5363 pciide_channel_dma_setup(cp); 5364 5365 for (drive = 0; drive < 2; drive++) { 5366 drvp = &chp->ch_drive[drive]; 5367 /* If no drive, skip */ 5368 if ((drvp->drive_flags & DRIVE) == 0) 5369 continue; 5370 /* add timing values, setup DMA if needed */ 5371 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5372 (drvp->drive_flags & DRIVE_UDMA) == 0) 5373 goto pio; 5374 5375 if (drvp->drive_flags & DRIVE_UDMA) { 5376 /* use Ultra/DMA */ 5377 drvp->drive_flags &= ~DRIVE_DMA; 5378 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5379 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 5380 if (drvp->UDMA_mode > 2) 5381 drvp->UDMA_mode = 2; 5382 } 5383 switch (sis->sis_type) { 5384 case SIS_TYPE_66: 5385 case SIS_TYPE_100OLD: 5386 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 5387 SIS_TIM66_UDMA_TIME_OFF(drive); 5388 break; 5389 case SIS_TYPE_100NEW: 5390 sis_tim |= 5391 sis_udma100new_tim[drvp->UDMA_mode] << 5392 SIS_TIM100_UDMA_TIME_OFF(drive); 5393 break; 5394 case SIS_TYPE_133OLD: 5395 sis_tim |= 5396 sis_udma133old_tim[drvp->UDMA_mode] << 5397 SIS_TIM100_UDMA_TIME_OFF(drive); 5398 break; 5399 default: 5400 printf("unknown SiS IDE type %d\n", 5401 sis->sis_type); 5402 } 5403 } else { 5404 /* 5405 * use Multiword DMA 5406 * Timings will be used for both PIO and DMA, 5407 * so adjust DMA mode if needed 5408 */ 5409 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5410 drvp->PIO_mode = drvp->DMA_mode + 2; 5411 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5412 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5413 drvp->PIO_mode - 2 : 0; 5414 if (drvp->DMA_mode == 0) 5415 drvp->PIO_mode = 0; 5416 } 5417 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5418 pio: switch (sis->sis_type) { 5419 case SIS_TYPE_NOUDMA: 5420 case SIS_TYPE_66: 5421 case SIS_TYPE_100OLD: 5422 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5423 SIS_TIM66_ACT_OFF(drive); 5424 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5425 SIS_TIM66_REC_OFF(drive); 5426 break; 5427 case SIS_TYPE_100NEW: 5428 case SIS_TYPE_133OLD: 5429 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5430 SIS_TIM100_ACT_OFF(drive); 5431 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5432 SIS_TIM100_REC_OFF(drive); 5433 break; 5434 default: 5435 printf("unknown SiS IDE type %d\n", 5436 sis->sis_type); 5437 } 5438 } 5439 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 5440 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 5441 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 5442 if (idedma_ctl != 0) { 5443 /* Add software bits in status register */ 5444 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5445 IDEDMA_CTL(chp->channel), idedma_ctl); 5446 } 5447 pciide_print_modes(cp); 5448 } 5449 5450 void 5451 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5452 { 5453 struct pciide_channel *cp; 5454 int channel; 5455 pcireg_t interface, ctl; 5456 bus_size_t cmdsize, ctlsize; 5457 5458 printf(": DMA"); 5459 pciide_mapreg_dma(sc, pa); 5460 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 5461 5462 if (sc->sc_dma_ok) { 5463 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5464 sc->sc_wdcdev.irqack = natsemi_irqack; 5465 } 5466 5467 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5468 5469 /* 5470 * Mask off interrupts from both channels, appropriate channel(s) 5471 * will be unmasked later. 5472 */ 5473 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5474 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5475 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5476 5477 sc->sc_wdcdev.PIO_cap = 4; 5478 sc->sc_wdcdev.DMA_cap = 2; 5479 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5480 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5481 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5482 5483 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5484 PCI_CLASS_REG)); 5485 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5486 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5487 5488 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5489 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5490 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5491 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5492 else 5493 ctl |= NATSEMI_CTRL1_INTAMASK; 5494 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5495 5496 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5497 cp = &sc->pciide_channels[channel]; 5498 if (pciide_chansetup(sc, channel, interface) == 0) 5499 continue; 5500 5501 pciide_map_compat_intr(pa, cp, channel, interface); 5502 if (cp->hw_ok == 0) 5503 continue; 5504 5505 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5506 natsemi_pci_intr); 5507 if (cp->hw_ok == 0) { 5508 pciide_unmap_compat_intr(pa, cp, channel, interface); 5509 continue; 5510 } 5511 natsemi_setup_channel(&cp->wdc_channel); 5512 } 5513 } 5514 5515 void 5516 natsemi_setup_channel(struct channel_softc *chp) 5517 { 5518 struct ata_drive_datas *drvp; 5519 int drive, ndrives = 0; 5520 u_int32_t idedma_ctl = 0; 5521 struct pciide_channel *cp = (struct pciide_channel *)chp; 5522 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5523 u_int8_t tim; 5524 5525 /* setup DMA if needed */ 5526 pciide_channel_dma_setup(cp); 5527 5528 for (drive = 0; drive < 2; drive++) { 5529 drvp = &chp->ch_drive[drive]; 5530 /* If no drive, skip */ 5531 if ((drvp->drive_flags & DRIVE) == 0) 5532 continue; 5533 5534 ndrives++; 5535 /* add timing values, setup DMA if needed */ 5536 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5537 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5538 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5539 } else { 5540 /* 5541 * use Multiword DMA 5542 * Timings will be used for both PIO and DMA, 5543 * so adjust DMA mode if needed 5544 */ 5545 if (drvp->PIO_mode >= 3 && 5546 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5547 drvp->DMA_mode = drvp->PIO_mode - 2; 5548 } 5549 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5550 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5551 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5552 } 5553 5554 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5555 NATSEMI_RTREG(chp->channel, drive), tim); 5556 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5557 NATSEMI_WTREG(chp->channel, drive), tim); 5558 } 5559 if (idedma_ctl != 0) { 5560 /* Add software bits in status register */ 5561 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5562 IDEDMA_CTL(chp->channel), idedma_ctl); 5563 } 5564 if (ndrives > 0) { 5565 /* Unmask the channel if at least one drive is found */ 5566 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5567 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5568 ~(NATSEMI_CHMASK(chp->channel))); 5569 } 5570 5571 pciide_print_modes(cp); 5572 5573 /* Go ahead and ack interrupts generated during probe. */ 5574 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5575 IDEDMA_CTL(chp->channel), 5576 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5577 IDEDMA_CTL(chp->channel))); 5578 } 5579 5580 void 5581 natsemi_irqack(struct channel_softc *chp) 5582 { 5583 struct pciide_channel *cp = (struct pciide_channel *)chp; 5584 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5585 u_int8_t clr; 5586 5587 /* The "clear" bits are in the wrong register *sigh* */ 5588 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5589 IDEDMA_CMD(chp->channel)); 5590 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5591 IDEDMA_CTL(chp->channel)) & 5592 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5593 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5594 IDEDMA_CMD(chp->channel), clr); 5595 } 5596 5597 int 5598 natsemi_pci_intr(void *arg) 5599 { 5600 struct pciide_softc *sc = arg; 5601 struct pciide_channel *cp; 5602 struct channel_softc *wdc_cp; 5603 int i, rv, crv; 5604 u_int8_t msk; 5605 5606 rv = 0; 5607 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5608 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5609 cp = &sc->pciide_channels[i]; 5610 wdc_cp = &cp->wdc_channel; 5611 5612 /* If a compat channel skip. */ 5613 if (cp->compat) 5614 continue; 5615 5616 /* If this channel is masked, skip it. */ 5617 if (msk & NATSEMI_CHMASK(i)) 5618 continue; 5619 5620 if (pciide_intr_flag(cp) == 0) 5621 continue; 5622 5623 crv = wdcintr(wdc_cp); 5624 if (crv == 0) 5625 ; /* leave rv alone */ 5626 else if (crv == 1) 5627 rv = 1; /* claim the intr */ 5628 else if (rv == 0) /* crv should be -1 in this case */ 5629 rv = crv; /* if we've done no better, take it */ 5630 } 5631 return (rv); 5632 } 5633 5634 void 5635 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5636 { 5637 struct pciide_channel *cp; 5638 int channel; 5639 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5640 bus_size_t cmdsize, ctlsize; 5641 5642 printf(": DMA"); 5643 pciide_mapreg_dma(sc, pa); 5644 5645 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5646 WDC_CAPABILITY_MODE; 5647 if (sc->sc_dma_ok) { 5648 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5649 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5650 sc->sc_wdcdev.irqack = pciide_irqack; 5651 } 5652 sc->sc_wdcdev.PIO_cap = 4; 5653 sc->sc_wdcdev.DMA_cap = 2; 5654 sc->sc_wdcdev.UDMA_cap = 2; 5655 5656 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5657 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5658 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5659 5660 /* 5661 * Soekris net4801 errata 0003: 5662 * 5663 * The SC1100 built in busmaster IDE controller is pretty standard, 5664 * but have two bugs: data transfers need to be dword aligned and 5665 * it cannot do an exact 64Kbyte data transfer. 5666 * 5667 * Assume that reducing maximum segment size by one page 5668 * will be enough, and restrict boundary too for extra certainty. 5669 */ 5670 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5671 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5672 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5673 } 5674 5675 /* 5676 * This chip seems to be unable to do one-sector transfers 5677 * using DMA. 5678 */ 5679 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5680 5681 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5682 5683 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5684 cp = &sc->pciide_channels[channel]; 5685 if (pciide_chansetup(sc, channel, interface) == 0) 5686 continue; 5687 pciide_map_compat_intr(pa, cp, channel, interface); 5688 if (cp->hw_ok == 0) 5689 continue; 5690 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5691 pciide_pci_intr); 5692 if (cp->hw_ok == 0) { 5693 pciide_unmap_compat_intr(pa, cp, channel, interface); 5694 continue; 5695 } 5696 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5697 } 5698 } 5699 5700 void 5701 ns_scx200_setup_channel(struct channel_softc *chp) 5702 { 5703 struct ata_drive_datas *drvp; 5704 int drive, mode; 5705 u_int32_t idedma_ctl; 5706 struct pciide_channel *cp = (struct pciide_channel*)chp; 5707 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5708 int channel = chp->channel; 5709 int pioformat; 5710 pcireg_t piotim, dmatim; 5711 5712 /* Setup DMA if needed */ 5713 pciide_channel_dma_setup(cp); 5714 5715 idedma_ctl = 0; 5716 5717 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5718 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5719 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5720 DEBUG_PROBE); 5721 5722 /* Per channel settings */ 5723 for (drive = 0; drive < 2; drive++) { 5724 drvp = &chp->ch_drive[drive]; 5725 5726 /* If no drive, skip */ 5727 if ((drvp->drive_flags & DRIVE) == 0) 5728 continue; 5729 5730 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5731 SCx200_TIM_PIO(channel, drive)); 5732 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5733 SCx200_TIM_DMA(channel, drive)); 5734 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5735 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5736 piotim, dmatim), DEBUG_PROBE); 5737 5738 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5739 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5740 /* Setup UltraDMA mode */ 5741 drvp->drive_flags &= ~DRIVE_DMA; 5742 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5743 dmatim = scx200_udma33[drvp->UDMA_mode]; 5744 mode = drvp->PIO_mode; 5745 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5746 (drvp->drive_flags & DRIVE_DMA) != 0) { 5747 /* Setup multiword DMA mode */ 5748 drvp->drive_flags &= ~DRIVE_UDMA; 5749 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5750 dmatim = scx200_dma33[drvp->DMA_mode]; 5751 5752 /* mode = min(pio, dma + 2) */ 5753 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5754 mode = drvp->PIO_mode; 5755 else 5756 mode = drvp->DMA_mode + 2; 5757 } else { 5758 mode = drvp->PIO_mode; 5759 } 5760 5761 /* Setup PIO mode */ 5762 drvp->PIO_mode = mode; 5763 if (mode < 2) 5764 drvp->DMA_mode = 0; 5765 else 5766 drvp->DMA_mode = mode - 2; 5767 5768 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5769 5770 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5771 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5772 piotim, dmatim), DEBUG_PROBE); 5773 5774 pci_conf_write(sc->sc_pc, sc->sc_tag, 5775 SCx200_TIM_PIO(channel, drive), piotim); 5776 pci_conf_write(sc->sc_pc, sc->sc_tag, 5777 SCx200_TIM_DMA(channel, drive), dmatim); 5778 } 5779 5780 if (idedma_ctl != 0) { 5781 /* Add software bits in status register */ 5782 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5783 IDEDMA_CTL(channel), idedma_ctl); 5784 } 5785 5786 pciide_print_modes(cp); 5787 } 5788 5789 void 5790 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5791 { 5792 struct pciide_channel *cp; 5793 int channel; 5794 pcireg_t cr, interface; 5795 bus_size_t cmdsize, ctlsize; 5796 int rev = sc->sc_rev; 5797 5798 printf(": DMA"); 5799 pciide_mapreg_dma(sc, pa); 5800 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5801 WDC_CAPABILITY_MODE; 5802 5803 if (sc->sc_dma_ok) { 5804 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5805 if (rev >= 0x20) { 5806 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5807 if (rev >= 0xC4) 5808 sc->sc_wdcdev.UDMA_cap = 5; 5809 else if (rev >= 0xC2) 5810 sc->sc_wdcdev.UDMA_cap = 4; 5811 else 5812 sc->sc_wdcdev.UDMA_cap = 2; 5813 } 5814 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5815 sc->sc_wdcdev.irqack = pciide_irqack; 5816 if (rev <= 0xC4) 5817 sc->sc_wdcdev.dma_init = acer_dma_init; 5818 } 5819 5820 sc->sc_wdcdev.PIO_cap = 4; 5821 sc->sc_wdcdev.DMA_cap = 2; 5822 sc->sc_wdcdev.set_modes = acer_setup_channel; 5823 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5824 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5825 5826 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5827 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5828 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5829 5830 /* Enable "microsoft register bits" R/W. */ 5831 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5832 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5833 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5834 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5835 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5836 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5837 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5838 ~ACER_CHANSTATUSREGS_RO); 5839 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5840 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5841 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5842 /* Don't use cr, re-read the real register content instead */ 5843 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5844 PCI_CLASS_REG)); 5845 5846 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5847 5848 /* From linux: enable "Cable Detection" */ 5849 if (rev >= 0xC2) 5850 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5851 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5852 | ACER_0x4B_CDETECT); 5853 5854 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5855 cp = &sc->pciide_channels[channel]; 5856 if (pciide_chansetup(sc, channel, interface) == 0) 5857 continue; 5858 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5859 printf("%s: %s ignored (disabled)\n", 5860 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5861 cp->hw_ok = 0; 5862 continue; 5863 } 5864 pciide_map_compat_intr(pa, cp, channel, interface); 5865 if (cp->hw_ok == 0) 5866 continue; 5867 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5868 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5869 if (cp->hw_ok == 0) { 5870 pciide_unmap_compat_intr(pa, cp, channel, interface); 5871 continue; 5872 } 5873 if (pciide_chan_candisable(cp)) { 5874 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5875 pci_conf_write(sc->sc_pc, sc->sc_tag, 5876 PCI_CLASS_REG, cr); 5877 } 5878 if (cp->hw_ok == 0) { 5879 pciide_unmap_compat_intr(pa, cp, channel, interface); 5880 continue; 5881 } 5882 acer_setup_channel(&cp->wdc_channel); 5883 } 5884 } 5885 5886 void 5887 acer_setup_channel(struct channel_softc *chp) 5888 { 5889 struct ata_drive_datas *drvp; 5890 int drive; 5891 u_int32_t acer_fifo_udma; 5892 u_int32_t idedma_ctl; 5893 struct pciide_channel *cp = (struct pciide_channel *)chp; 5894 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5895 5896 idedma_ctl = 0; 5897 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5898 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5899 acer_fifo_udma), DEBUG_PROBE); 5900 /* setup DMA if needed */ 5901 pciide_channel_dma_setup(cp); 5902 5903 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5904 DRIVE_UDMA) { /* check 80 pins cable */ 5905 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5906 ACER_0x4A_80PIN(chp->channel)) { 5907 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5908 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5909 DEBUG_PROBE); 5910 if (chp->ch_drive[0].UDMA_mode > 2) 5911 chp->ch_drive[0].UDMA_mode = 2; 5912 if (chp->ch_drive[1].UDMA_mode > 2) 5913 chp->ch_drive[1].UDMA_mode = 2; 5914 } 5915 } 5916 5917 for (drive = 0; drive < 2; drive++) { 5918 drvp = &chp->ch_drive[drive]; 5919 /* If no drive, skip */ 5920 if ((drvp->drive_flags & DRIVE) == 0) 5921 continue; 5922 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5923 "channel %d drive %d 0x%x\n", chp->channel, drive, 5924 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5925 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5926 /* clear FIFO/DMA mode */ 5927 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5928 ACER_UDMA_EN(chp->channel, drive) | 5929 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5930 5931 /* add timing values, setup DMA if needed */ 5932 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5933 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5934 acer_fifo_udma |= 5935 ACER_FTH_OPL(chp->channel, drive, 0x1); 5936 goto pio; 5937 } 5938 5939 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5940 if (drvp->drive_flags & DRIVE_UDMA) { 5941 /* use Ultra/DMA */ 5942 drvp->drive_flags &= ~DRIVE_DMA; 5943 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5944 acer_fifo_udma |= 5945 ACER_UDMA_TIM(chp->channel, drive, 5946 acer_udma[drvp->UDMA_mode]); 5947 /* XXX disable if one drive < UDMA3 ? */ 5948 if (drvp->UDMA_mode >= 3) { 5949 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5950 ACER_0x4B, 5951 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5952 ACER_0x4B) | ACER_0x4B_UDMA66); 5953 } 5954 } else { 5955 /* 5956 * use Multiword DMA 5957 * Timings will be used for both PIO and DMA, 5958 * so adjust DMA mode if needed 5959 */ 5960 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5961 drvp->PIO_mode = drvp->DMA_mode + 2; 5962 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5963 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5964 drvp->PIO_mode - 2 : 0; 5965 if (drvp->DMA_mode == 0) 5966 drvp->PIO_mode = 0; 5967 } 5968 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5969 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5970 ACER_IDETIM(chp->channel, drive), 5971 acer_pio[drvp->PIO_mode]); 5972 } 5973 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5974 acer_fifo_udma), DEBUG_PROBE); 5975 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5976 if (idedma_ctl != 0) { 5977 /* Add software bits in status register */ 5978 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5979 IDEDMA_CTL(chp->channel), idedma_ctl); 5980 } 5981 pciide_print_modes(cp); 5982 } 5983 5984 int 5985 acer_pci_intr(void *arg) 5986 { 5987 struct pciide_softc *sc = arg; 5988 struct pciide_channel *cp; 5989 struct channel_softc *wdc_cp; 5990 int i, rv, crv; 5991 u_int32_t chids; 5992 5993 rv = 0; 5994 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5995 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5996 cp = &sc->pciide_channels[i]; 5997 wdc_cp = &cp->wdc_channel; 5998 /* If a compat channel skip. */ 5999 if (cp->compat) 6000 continue; 6001 if (chids & ACER_CHIDS_INT(i)) { 6002 crv = wdcintr(wdc_cp); 6003 if (crv == 0) 6004 printf("%s:%d: bogus intr\n", 6005 sc->sc_wdcdev.sc_dev.dv_xname, i); 6006 else 6007 rv = 1; 6008 } 6009 } 6010 return (rv); 6011 } 6012 6013 int 6014 acer_dma_init(void *v, int channel, int drive, void *databuf, 6015 size_t datalen, int flags) 6016 { 6017 /* Use PIO for LBA48 transfers. */ 6018 if (flags & WDC_DMA_LBA48) 6019 return (EINVAL); 6020 6021 return (pciide_dma_init(v, channel, drive, databuf, datalen, flags)); 6022 } 6023 6024 void 6025 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6026 { 6027 struct pciide_channel *cp; 6028 int i, compatchan, revision; 6029 pcireg_t interface; 6030 bus_size_t cmdsize, ctlsize; 6031 6032 revision = sc->sc_rev; 6033 6034 /* 6035 * when the chip is in native mode it identifies itself as a 6036 * 'misc mass storage'. Fake interface in this case. 6037 */ 6038 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 6039 interface = PCI_INTERFACE(pa->pa_class); 6040 } else { 6041 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 6042 PCIIDE_INTERFACE_PCI(0); 6043 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6044 (revision == HPT370_REV || revision == HPT370A_REV || 6045 revision == HPT372_REV)) || 6046 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6047 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6048 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6049 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 6050 interface |= PCIIDE_INTERFACE_PCI(1); 6051 } 6052 6053 printf(": DMA"); 6054 pciide_mapreg_dma(sc, pa); 6055 printf("\n"); 6056 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6057 WDC_CAPABILITY_MODE; 6058 if (sc->sc_dma_ok) { 6059 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6060 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6061 sc->sc_wdcdev.irqack = pciide_irqack; 6062 } 6063 sc->sc_wdcdev.PIO_cap = 4; 6064 sc->sc_wdcdev.DMA_cap = 2; 6065 6066 sc->sc_wdcdev.set_modes = hpt_setup_channel; 6067 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6068 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6069 revision == HPT366_REV) { 6070 sc->sc_wdcdev.UDMA_cap = 4; 6071 /* 6072 * The 366 has 2 PCI IDE functions, one for primary and one 6073 * for secondary. So we need to call pciide_mapregs_compat() 6074 * with the real channel 6075 */ 6076 if (pa->pa_function == 0) { 6077 compatchan = 0; 6078 } else if (pa->pa_function == 1) { 6079 compatchan = 1; 6080 } else { 6081 printf("%s: unexpected PCI function %d\n", 6082 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 6083 return; 6084 } 6085 sc->sc_wdcdev.nchannels = 1; 6086 } else { 6087 sc->sc_wdcdev.nchannels = 2; 6088 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6089 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6090 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6091 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 6092 sc->sc_wdcdev.UDMA_cap = 6; 6093 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 6094 if (revision == HPT372_REV) 6095 sc->sc_wdcdev.UDMA_cap = 6; 6096 else 6097 sc->sc_wdcdev.UDMA_cap = 5; 6098 } 6099 } 6100 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6101 cp = &sc->pciide_channels[i]; 6102 compatchan = 0; 6103 if (sc->sc_wdcdev.nchannels > 1) { 6104 compatchan = i; 6105 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 6106 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 6107 printf("%s: %s ignored (disabled)\n", 6108 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6109 cp->hw_ok = 0; 6110 continue; 6111 } 6112 } 6113 if (pciide_chansetup(sc, i, interface) == 0) 6114 continue; 6115 if (interface & PCIIDE_INTERFACE_PCI(i)) { 6116 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 6117 &ctlsize, hpt_pci_intr); 6118 } else { 6119 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 6120 &cmdsize, &ctlsize); 6121 } 6122 if (cp->hw_ok == 0) 6123 return; 6124 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 6125 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 6126 wdcattach(&cp->wdc_channel); 6127 hpt_setup_channel(&cp->wdc_channel); 6128 } 6129 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6130 (revision == HPT370_REV || revision == HPT370A_REV || 6131 revision == HPT372_REV)) || 6132 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6133 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6134 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6135 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 6136 /* 6137 * Turn off fast interrupts 6138 */ 6139 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 6140 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 6141 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 6142 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 6143 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 6144 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 6145 6146 /* 6147 * HPT370 and highter has a bit to disable interrupts, 6148 * make sure to clear it 6149 */ 6150 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 6151 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 6152 ~HPT_CSEL_IRQDIS); 6153 } 6154 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 6155 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6156 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6157 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6158 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 6159 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6160 revision == HPT372_REV)) 6161 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 6162 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 6163 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 6164 6165 return; 6166 } 6167 6168 void 6169 hpt_setup_channel(struct channel_softc *chp) 6170 { 6171 struct ata_drive_datas *drvp; 6172 int drive; 6173 int cable; 6174 u_int32_t before, after; 6175 u_int32_t idedma_ctl; 6176 struct pciide_channel *cp = (struct pciide_channel *)chp; 6177 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6178 int revision = sc->sc_rev; 6179 u_int32_t *tim_pio, *tim_dma, *tim_udma; 6180 6181 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 6182 6183 /* setup DMA if needed */ 6184 pciide_channel_dma_setup(cp); 6185 6186 idedma_ctl = 0; 6187 6188 switch (sc->sc_pp->ide_product) { 6189 case PCI_PRODUCT_TRIONES_HPT366: 6190 if (revision == HPT370_REV || 6191 revision == HPT370A_REV) { 6192 tim_pio = hpt370_pio; 6193 tim_dma = hpt370_dma; 6194 tim_udma = hpt370_udma; 6195 } else if (revision == HPT372_REV) { 6196 tim_pio = hpt372_pio; 6197 tim_dma = hpt372_dma; 6198 tim_udma = hpt372_udma; 6199 } else { 6200 tim_pio = hpt366_pio; 6201 tim_dma = hpt366_dma; 6202 tim_udma = hpt366_udma; 6203 } 6204 break; 6205 case PCI_PRODUCT_TRIONES_HPT372A: 6206 case PCI_PRODUCT_TRIONES_HPT302: 6207 case PCI_PRODUCT_TRIONES_HPT371: 6208 tim_pio = hpt372_pio; 6209 tim_dma = hpt372_dma; 6210 tim_udma = hpt372_udma; 6211 break; 6212 case PCI_PRODUCT_TRIONES_HPT374: 6213 tim_pio = hpt374_pio; 6214 tim_dma = hpt374_dma; 6215 tim_udma = hpt374_udma; 6216 break; 6217 default: 6218 printf("%s: no known timing values\n", 6219 sc->sc_wdcdev.sc_dev.dv_xname); 6220 goto end; 6221 } 6222 6223 /* Per drive settings */ 6224 for (drive = 0; drive < 2; drive++) { 6225 drvp = &chp->ch_drive[drive]; 6226 /* If no drive, skip */ 6227 if ((drvp->drive_flags & DRIVE) == 0) 6228 continue; 6229 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 6230 HPT_IDETIM(chp->channel, drive)); 6231 6232 /* add timing values, setup DMA if needed */ 6233 if (drvp->drive_flags & DRIVE_UDMA) { 6234 /* use Ultra/DMA */ 6235 drvp->drive_flags &= ~DRIVE_DMA; 6236 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 6237 drvp->UDMA_mode > 2) { 6238 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6239 "cable not detected\n", drvp->drive_name, 6240 sc->sc_wdcdev.sc_dev.dv_xname, 6241 chp->channel, drive), DEBUG_PROBE); 6242 drvp->UDMA_mode = 2; 6243 } 6244 after = tim_udma[drvp->UDMA_mode]; 6245 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6246 } else if (drvp->drive_flags & DRIVE_DMA) { 6247 /* 6248 * use Multiword DMA. 6249 * Timings will be used for both PIO and DMA, so adjust 6250 * DMA mode if needed 6251 */ 6252 if (drvp->PIO_mode >= 3 && 6253 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 6254 drvp->DMA_mode = drvp->PIO_mode - 2; 6255 } 6256 after = tim_dma[drvp->DMA_mode]; 6257 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6258 } else { 6259 /* PIO only */ 6260 after = tim_pio[drvp->PIO_mode]; 6261 } 6262 pci_conf_write(sc->sc_pc, sc->sc_tag, 6263 HPT_IDETIM(chp->channel, drive), after); 6264 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 6265 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 6266 after, before), DEBUG_PROBE); 6267 } 6268 end: 6269 if (idedma_ctl != 0) { 6270 /* Add software bits in status register */ 6271 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6272 IDEDMA_CTL(chp->channel), idedma_ctl); 6273 } 6274 pciide_print_modes(cp); 6275 } 6276 6277 int 6278 hpt_pci_intr(void *arg) 6279 { 6280 struct pciide_softc *sc = arg; 6281 struct pciide_channel *cp; 6282 struct channel_softc *wdc_cp; 6283 int rv = 0; 6284 int dmastat, i, crv; 6285 6286 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6287 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6288 IDEDMA_CTL(i)); 6289 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 6290 IDEDMA_CTL_INTR) 6291 continue; 6292 cp = &sc->pciide_channels[i]; 6293 wdc_cp = &cp->wdc_channel; 6294 crv = wdcintr(wdc_cp); 6295 if (crv == 0) { 6296 printf("%s:%d: bogus intr\n", 6297 sc->sc_wdcdev.sc_dev.dv_xname, i); 6298 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6299 IDEDMA_CTL(i), dmastat); 6300 } else 6301 rv = 1; 6302 } 6303 return (rv); 6304 } 6305 6306 /* Macros to test product */ 6307 #define PDC_IS_262(sc) \ 6308 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 6309 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6310 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 6311 #define PDC_IS_265(sc) \ 6312 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6313 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 6314 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6315 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6316 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6317 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6318 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6319 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6320 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6321 #define PDC_IS_268(sc) \ 6322 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6323 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6324 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6325 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6326 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6327 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6328 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6329 #define PDC_IS_269(sc) \ 6330 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6331 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6332 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6333 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6334 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6335 6336 u_int8_t 6337 pdc268_config_read(struct channel_softc *chp, int index) 6338 { 6339 struct pciide_channel *cp = (struct pciide_channel *)chp; 6340 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6341 int channel = chp->channel; 6342 6343 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6344 PDC268_INDEX(channel), index); 6345 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6346 PDC268_DATA(channel))); 6347 } 6348 6349 void 6350 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6351 { 6352 struct pciide_channel *cp; 6353 int channel; 6354 pcireg_t interface, st, mode; 6355 bus_size_t cmdsize, ctlsize; 6356 6357 if (!PDC_IS_268(sc)) { 6358 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6359 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 6360 st), DEBUG_PROBE); 6361 } 6362 6363 /* turn off RAID mode */ 6364 if (!PDC_IS_268(sc)) 6365 st &= ~PDC2xx_STATE_IDERAID; 6366 6367 /* 6368 * can't rely on the PCI_CLASS_REG content if the chip was in raid 6369 * mode. We have to fake interface 6370 */ 6371 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 6372 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 6373 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 6374 6375 printf(": DMA"); 6376 pciide_mapreg_dma(sc, pa); 6377 6378 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6379 WDC_CAPABILITY_MODE; 6380 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 6381 PDC_IS_262(sc)) 6382 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 6383 if (sc->sc_dma_ok) { 6384 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6385 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6386 sc->sc_wdcdev.irqack = pciide_irqack; 6387 } 6388 sc->sc_wdcdev.PIO_cap = 4; 6389 sc->sc_wdcdev.DMA_cap = 2; 6390 if (PDC_IS_269(sc)) 6391 sc->sc_wdcdev.UDMA_cap = 6; 6392 else if (PDC_IS_265(sc)) 6393 sc->sc_wdcdev.UDMA_cap = 5; 6394 else if (PDC_IS_262(sc)) 6395 sc->sc_wdcdev.UDMA_cap = 4; 6396 else 6397 sc->sc_wdcdev.UDMA_cap = 2; 6398 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 6399 pdc20268_setup_channel : pdc202xx_setup_channel; 6400 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6401 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6402 6403 if (PDC_IS_262(sc)) { 6404 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 6405 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 6406 } 6407 6408 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6409 if (!PDC_IS_268(sc)) { 6410 /* setup failsafe defaults */ 6411 mode = 0; 6412 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 6413 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 6414 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 6415 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 6416 for (channel = 0; 6417 channel < sc->sc_wdcdev.nchannels; 6418 channel++) { 6419 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6420 "drive 0 initial timings 0x%x, now 0x%x\n", 6421 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6422 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 6423 DEBUG_PROBE); 6424 pci_conf_write(sc->sc_pc, sc->sc_tag, 6425 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 6426 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6427 "drive 1 initial timings 0x%x, now 0x%x\n", 6428 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6429 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 6430 pci_conf_write(sc->sc_pc, sc->sc_tag, 6431 PDC2xx_TIM(channel, 1), mode); 6432 } 6433 6434 mode = PDC2xx_SCR_DMA; 6435 if (PDC_IS_262(sc)) { 6436 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 6437 } else { 6438 /* the BIOS set it up this way */ 6439 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 6440 } 6441 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 6442 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 6443 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 6444 "now 0x%x\n", 6445 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6446 PDC2xx_SCR), 6447 mode), DEBUG_PROBE); 6448 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6449 PDC2xx_SCR, mode); 6450 6451 /* controller initial state register is OK even without BIOS */ 6452 /* Set DMA mode to IDE DMA compatibility */ 6453 mode = 6454 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 6455 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 6456 DEBUG_PROBE); 6457 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 6458 mode | 0x1); 6459 mode = 6460 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 6461 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 6462 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 6463 mode | 0x1); 6464 } 6465 6466 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6467 cp = &sc->pciide_channels[channel]; 6468 if (pciide_chansetup(sc, channel, interface) == 0) 6469 continue; 6470 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 6471 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 6472 printf("%s: %s ignored (disabled)\n", 6473 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6474 cp->hw_ok = 0; 6475 continue; 6476 } 6477 pciide_map_compat_intr(pa, cp, channel, interface); 6478 if (cp->hw_ok == 0) 6479 continue; 6480 if (PDC_IS_265(sc)) 6481 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6482 pdc20265_pci_intr); 6483 else 6484 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6485 pdc202xx_pci_intr); 6486 if (cp->hw_ok == 0) { 6487 pciide_unmap_compat_intr(pa, cp, channel, interface); 6488 continue; 6489 } 6490 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6491 st &= ~(PDC_IS_262(sc) ? 6492 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6493 pciide_unmap_compat_intr(pa, cp, channel, interface); 6494 } 6495 if (PDC_IS_268(sc)) 6496 pdc20268_setup_channel(&cp->wdc_channel); 6497 else 6498 pdc202xx_setup_channel(&cp->wdc_channel); 6499 } 6500 if (!PDC_IS_268(sc)) { 6501 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6502 "0x%x\n", st), DEBUG_PROBE); 6503 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6504 } 6505 return; 6506 } 6507 6508 void 6509 pdc202xx_setup_channel(struct channel_softc *chp) 6510 { 6511 struct ata_drive_datas *drvp; 6512 int drive; 6513 pcireg_t mode, st; 6514 u_int32_t idedma_ctl, scr, atapi; 6515 struct pciide_channel *cp = (struct pciide_channel *)chp; 6516 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6517 int channel = chp->channel; 6518 6519 /* setup DMA if needed */ 6520 pciide_channel_dma_setup(cp); 6521 6522 idedma_ctl = 0; 6523 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6524 sc->sc_wdcdev.sc_dev.dv_xname, 6525 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6526 DEBUG_PROBE); 6527 6528 /* Per channel settings */ 6529 if (PDC_IS_262(sc)) { 6530 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6531 PDC262_U66); 6532 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6533 /* Check cable */ 6534 if ((st & PDC262_STATE_80P(channel)) != 0 && 6535 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6536 chp->ch_drive[0].UDMA_mode > 2) || 6537 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6538 chp->ch_drive[1].UDMA_mode > 2))) { 6539 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6540 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6541 DEBUG_PROBE); 6542 if (chp->ch_drive[0].UDMA_mode > 2) 6543 chp->ch_drive[0].UDMA_mode = 2; 6544 if (chp->ch_drive[1].UDMA_mode > 2) 6545 chp->ch_drive[1].UDMA_mode = 2; 6546 } 6547 /* Trim UDMA mode */ 6548 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6549 chp->ch_drive[0].UDMA_mode <= 2) || 6550 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6551 chp->ch_drive[1].UDMA_mode <= 2)) { 6552 if (chp->ch_drive[0].UDMA_mode > 2) 6553 chp->ch_drive[0].UDMA_mode = 2; 6554 if (chp->ch_drive[1].UDMA_mode > 2) 6555 chp->ch_drive[1].UDMA_mode = 2; 6556 } 6557 /* Set U66 if needed */ 6558 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6559 chp->ch_drive[0].UDMA_mode > 2) || 6560 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6561 chp->ch_drive[1].UDMA_mode > 2)) 6562 scr |= PDC262_U66_EN(channel); 6563 else 6564 scr &= ~PDC262_U66_EN(channel); 6565 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6566 PDC262_U66, scr); 6567 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6568 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6569 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6570 PDC262_ATAPI(channel))), DEBUG_PROBE); 6571 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6572 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6573 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6574 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6575 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6576 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6577 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6578 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6579 atapi = 0; 6580 else 6581 atapi = PDC262_ATAPI_UDMA; 6582 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6583 PDC262_ATAPI(channel), atapi); 6584 } 6585 } 6586 for (drive = 0; drive < 2; drive++) { 6587 drvp = &chp->ch_drive[drive]; 6588 /* If no drive, skip */ 6589 if ((drvp->drive_flags & DRIVE) == 0) 6590 continue; 6591 mode = 0; 6592 if (drvp->drive_flags & DRIVE_UDMA) { 6593 /* use Ultra/DMA */ 6594 drvp->drive_flags &= ~DRIVE_DMA; 6595 mode = PDC2xx_TIM_SET_MB(mode, 6596 pdc2xx_udma_mb[drvp->UDMA_mode]); 6597 mode = PDC2xx_TIM_SET_MC(mode, 6598 pdc2xx_udma_mc[drvp->UDMA_mode]); 6599 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6600 } else if (drvp->drive_flags & DRIVE_DMA) { 6601 mode = PDC2xx_TIM_SET_MB(mode, 6602 pdc2xx_dma_mb[drvp->DMA_mode]); 6603 mode = PDC2xx_TIM_SET_MC(mode, 6604 pdc2xx_dma_mc[drvp->DMA_mode]); 6605 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6606 } else { 6607 mode = PDC2xx_TIM_SET_MB(mode, 6608 pdc2xx_dma_mb[0]); 6609 mode = PDC2xx_TIM_SET_MC(mode, 6610 pdc2xx_dma_mc[0]); 6611 } 6612 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6613 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6614 if (drvp->drive_flags & DRIVE_ATA) 6615 mode |= PDC2xx_TIM_PRE; 6616 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6617 if (drvp->PIO_mode >= 3) { 6618 mode |= PDC2xx_TIM_IORDY; 6619 if (drive == 0) 6620 mode |= PDC2xx_TIM_IORDYp; 6621 } 6622 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6623 "timings 0x%x\n", 6624 sc->sc_wdcdev.sc_dev.dv_xname, 6625 chp->channel, drive, mode), DEBUG_PROBE); 6626 pci_conf_write(sc->sc_pc, sc->sc_tag, 6627 PDC2xx_TIM(chp->channel, drive), mode); 6628 } 6629 if (idedma_ctl != 0) { 6630 /* Add software bits in status register */ 6631 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6632 IDEDMA_CTL(channel), idedma_ctl); 6633 } 6634 pciide_print_modes(cp); 6635 } 6636 6637 void 6638 pdc20268_setup_channel(struct channel_softc *chp) 6639 { 6640 struct ata_drive_datas *drvp; 6641 int drive, cable; 6642 u_int32_t idedma_ctl; 6643 struct pciide_channel *cp = (struct pciide_channel *)chp; 6644 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6645 int channel = chp->channel; 6646 6647 /* check 80 pins cable */ 6648 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6649 6650 /* setup DMA if needed */ 6651 pciide_channel_dma_setup(cp); 6652 6653 idedma_ctl = 0; 6654 6655 for (drive = 0; drive < 2; drive++) { 6656 drvp = &chp->ch_drive[drive]; 6657 /* If no drive, skip */ 6658 if ((drvp->drive_flags & DRIVE) == 0) 6659 continue; 6660 if (drvp->drive_flags & DRIVE_UDMA) { 6661 /* use Ultra/DMA */ 6662 drvp->drive_flags &= ~DRIVE_DMA; 6663 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6664 if (cable && drvp->UDMA_mode > 2) { 6665 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6666 "cable not detected\n", drvp->drive_name, 6667 sc->sc_wdcdev.sc_dev.dv_xname, 6668 channel, drive), DEBUG_PROBE); 6669 drvp->UDMA_mode = 2; 6670 } 6671 } else if (drvp->drive_flags & DRIVE_DMA) { 6672 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6673 } 6674 } 6675 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6676 if (idedma_ctl != 0) { 6677 /* Add software bits in status register */ 6678 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6679 IDEDMA_CTL(channel), idedma_ctl); 6680 } 6681 pciide_print_modes(cp); 6682 } 6683 6684 int 6685 pdc202xx_pci_intr(void *arg) 6686 { 6687 struct pciide_softc *sc = arg; 6688 struct pciide_channel *cp; 6689 struct channel_softc *wdc_cp; 6690 int i, rv, crv; 6691 u_int32_t scr; 6692 6693 rv = 0; 6694 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6695 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6696 cp = &sc->pciide_channels[i]; 6697 wdc_cp = &cp->wdc_channel; 6698 /* If a compat channel skip. */ 6699 if (cp->compat) 6700 continue; 6701 if (scr & PDC2xx_SCR_INT(i)) { 6702 crv = wdcintr(wdc_cp); 6703 if (crv == 0) 6704 printf("%s:%d: bogus intr (reg 0x%x)\n", 6705 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6706 else 6707 rv = 1; 6708 } 6709 } 6710 return (rv); 6711 } 6712 6713 int 6714 pdc20265_pci_intr(void *arg) 6715 { 6716 struct pciide_softc *sc = arg; 6717 struct pciide_channel *cp; 6718 struct channel_softc *wdc_cp; 6719 int i, rv, crv; 6720 u_int32_t dmastat; 6721 6722 rv = 0; 6723 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6724 cp = &sc->pciide_channels[i]; 6725 wdc_cp = &cp->wdc_channel; 6726 /* If a compat channel skip. */ 6727 if (cp->compat) 6728 continue; 6729 6730 /* 6731 * In case of shared IRQ check that the interrupt 6732 * was actually generated by this channel. 6733 * Only check the channel that is enabled. 6734 */ 6735 if (cp->hw_ok && PDC_IS_268(sc)) { 6736 if ((pdc268_config_read(wdc_cp, 6737 0x0b) & PDC268_INTR) == 0) 6738 continue; 6739 } 6740 6741 /* 6742 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6743 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6744 * So use it instead (requires 2 reg reads instead of 1, 6745 * but we can't do it another way). 6746 */ 6747 dmastat = bus_space_read_1(sc->sc_dma_iot, 6748 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6749 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6750 continue; 6751 6752 crv = wdcintr(wdc_cp); 6753 if (crv == 0) 6754 printf("%s:%d: bogus intr\n", 6755 sc->sc_wdcdev.sc_dev.dv_xname, i); 6756 else 6757 rv = 1; 6758 } 6759 return (rv); 6760 } 6761 6762 void 6763 pdc20262_dma_start(void *v, int channel, int drive) 6764 { 6765 struct pciide_softc *sc = v; 6766 struct pciide_dma_maps *dma_maps = 6767 &sc->pciide_channels[channel].dma_maps[drive]; 6768 u_int8_t clock; 6769 u_int32_t count; 6770 6771 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6772 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6773 PDC262_U66); 6774 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6775 PDC262_U66, clock | PDC262_U66_EN(channel)); 6776 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6777 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6778 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6779 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6780 PDC262_ATAPI(channel), count); 6781 } 6782 6783 pciide_dma_start(v, channel, drive); 6784 } 6785 6786 int 6787 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6788 { 6789 struct pciide_softc *sc = v; 6790 struct pciide_dma_maps *dma_maps = 6791 &sc->pciide_channels[channel].dma_maps[drive]; 6792 u_int8_t clock; 6793 6794 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6795 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6796 PDC262_U66); 6797 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6798 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6799 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6800 PDC262_ATAPI(channel), 0); 6801 } 6802 6803 return (pciide_dma_finish(v, channel, drive, force)); 6804 } 6805 6806 void 6807 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6808 { 6809 struct pciide_channel *cp; 6810 struct channel_softc *wdc_cp; 6811 struct pciide_pdcsata *ps; 6812 int channel, i; 6813 bus_size_t dmasize; 6814 pci_intr_handle_t intrhandle; 6815 const char *intrstr; 6816 6817 /* Allocate memory for private data */ 6818 sc->sc_cookielen = sizeof(*ps); 6819 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 6820 ps = sc->sc_cookie; 6821 6822 /* 6823 * Promise SATA controllers have 3 or 4 channels, 6824 * the usual IDE registers are mapped in I/O space, with offsets. 6825 */ 6826 if (pci_intr_map(pa, &intrhandle) != 0) { 6827 printf(": couldn't map interrupt\n"); 6828 return; 6829 } 6830 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6831 6832 switch (sc->sc_pp->ide_product) { 6833 case PCI_PRODUCT_PROMISE_PDC20318: 6834 case PCI_PRODUCT_PROMISE_PDC20319: 6835 case PCI_PRODUCT_PROMISE_PDC20371: 6836 case PCI_PRODUCT_PROMISE_PDC20375: 6837 case PCI_PRODUCT_PROMISE_PDC20376: 6838 case PCI_PRODUCT_PROMISE_PDC20377: 6839 case PCI_PRODUCT_PROMISE_PDC20378: 6840 case PCI_PRODUCT_PROMISE_PDC20379: 6841 default: 6842 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6843 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6844 sc->sc_wdcdev.sc_dev.dv_xname); 6845 break; 6846 6847 case PCI_PRODUCT_PROMISE_PDC40518: 6848 case PCI_PRODUCT_PROMISE_PDC40519: 6849 case PCI_PRODUCT_PROMISE_PDC40718: 6850 case PCI_PRODUCT_PROMISE_PDC40719: 6851 case PCI_PRODUCT_PROMISE_PDC40779: 6852 case PCI_PRODUCT_PROMISE_PDC20571: 6853 case PCI_PRODUCT_PROMISE_PDC20575: 6854 case PCI_PRODUCT_PROMISE_PDC20579: 6855 case PCI_PRODUCT_PROMISE_PDC20771: 6856 case PCI_PRODUCT_PROMISE_PDC20775: 6857 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6858 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6859 sc->sc_wdcdev.sc_dev.dv_xname); 6860 break; 6861 } 6862 6863 if (sc->sc_pci_ih == NULL) { 6864 printf(": couldn't establish native-PCI interrupt"); 6865 if (intrstr != NULL) 6866 printf(" at %s", intrstr); 6867 printf("\n"); 6868 return; 6869 } 6870 6871 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6872 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6873 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6874 if (!sc->sc_dma_ok) { 6875 printf(": couldn't map bus-master DMA registers\n"); 6876 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6877 return; 6878 } 6879 6880 sc->sc_dmat = pa->pa_dmat; 6881 6882 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6883 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6884 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6885 printf(": couldn't map IDE registers\n"); 6886 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6887 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6888 return; 6889 } 6890 6891 printf(": DMA\n"); 6892 6893 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6894 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6895 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6896 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6897 sc->sc_wdcdev.PIO_cap = 4; 6898 sc->sc_wdcdev.DMA_cap = 2; 6899 sc->sc_wdcdev.UDMA_cap = 6; 6900 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6901 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6902 6903 switch (sc->sc_pp->ide_product) { 6904 case PCI_PRODUCT_PROMISE_PDC20318: 6905 case PCI_PRODUCT_PROMISE_PDC20319: 6906 case PCI_PRODUCT_PROMISE_PDC20371: 6907 case PCI_PRODUCT_PROMISE_PDC20375: 6908 case PCI_PRODUCT_PROMISE_PDC20376: 6909 case PCI_PRODUCT_PROMISE_PDC20377: 6910 case PCI_PRODUCT_PROMISE_PDC20378: 6911 case PCI_PRODUCT_PROMISE_PDC20379: 6912 default: 6913 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6914 sc->sc_wdcdev.nchannels = 6915 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6916 PDC203xx_NCHANNELS : 3; 6917 break; 6918 6919 case PCI_PRODUCT_PROMISE_PDC40518: 6920 case PCI_PRODUCT_PROMISE_PDC40519: 6921 case PCI_PRODUCT_PROMISE_PDC40718: 6922 case PCI_PRODUCT_PROMISE_PDC40719: 6923 case PCI_PRODUCT_PROMISE_PDC40779: 6924 case PCI_PRODUCT_PROMISE_PDC20571: 6925 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6926 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6927 6928 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6929 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6930 6931 break; 6932 case PCI_PRODUCT_PROMISE_PDC20575: 6933 case PCI_PRODUCT_PROMISE_PDC20579: 6934 case PCI_PRODUCT_PROMISE_PDC20771: 6935 case PCI_PRODUCT_PROMISE_PDC20775: 6936 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6937 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6938 6939 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6940 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6941 6942 break; 6943 } 6944 6945 sc->sc_wdcdev.dma_arg = sc; 6946 sc->sc_wdcdev.dma_init = pciide_dma_init; 6947 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6948 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6949 6950 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6951 channel++) { 6952 cp = &sc->pciide_channels[channel]; 6953 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6954 6955 cp->ih = sc->sc_pci_ih; 6956 cp->name = NULL; 6957 cp->wdc_channel.channel = channel; 6958 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6959 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 6960 if (cp->wdc_channel.ch_queue == NULL) { 6961 printf("%s: channel %d: " 6962 "cannot allocate channel queue\n", 6963 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6964 continue; 6965 } 6966 wdc_cp = &cp->wdc_channel; 6967 6968 ps->regs[channel].ctl_iot = ps->ba5_st; 6969 ps->regs[channel].cmd_iot = ps->ba5_st; 6970 6971 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6972 0x0238 + (channel << 7), 1, 6973 &ps->regs[channel].ctl_ioh) != 0) { 6974 printf("%s: couldn't map channel %d ctl regs\n", 6975 sc->sc_wdcdev.sc_dev.dv_xname, 6976 channel); 6977 continue; 6978 } 6979 for (i = 0; i < WDC_NREG; i++) { 6980 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6981 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6982 &ps->regs[channel].cmd_iohs[i]) != 0) { 6983 printf("%s: couldn't map channel %d cmd " 6984 "regs\n", 6985 sc->sc_wdcdev.sc_dev.dv_xname, 6986 channel); 6987 goto loop_end; 6988 } 6989 } 6990 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6991 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6992 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6993 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6994 wdc_cp->data32iot = wdc_cp->cmd_iot = 6995 ps->regs[channel].cmd_iot; 6996 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6997 ps->regs[channel].cmd_iohs[0]; 6998 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6999 7000 /* 7001 * Subregion de busmaster registers. They're spread all over 7002 * the controller's register space :(. They are also 4 bytes 7003 * sized, with some specific extentions in the extra bits. 7004 * It also seems that the IDEDMA_CTL register isn't available. 7005 */ 7006 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 7007 0x260 + (channel << 7), 1, 7008 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 7009 printf("%s channel %d: can't subregion DMA " 7010 "registers\n", 7011 sc->sc_wdcdev.sc_dev.dv_xname, channel); 7012 continue; 7013 } 7014 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 7015 0x244 + (channel << 7), 4, 7016 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 7017 printf("%s channel %d: can't subregion DMA " 7018 "registers\n", 7019 sc->sc_wdcdev.sc_dev.dv_xname, channel); 7020 continue; 7021 } 7022 7023 wdcattach(wdc_cp); 7024 bus_space_write_4(sc->sc_dma_iot, 7025 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7026 (bus_space_read_4(sc->sc_dma_iot, 7027 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7028 0) & ~0x00003f9f) | (channel + 1)); 7029 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 7030 (channel + 1) << 2, 0x00000001); 7031 7032 pdc203xx_setup_channel(&cp->wdc_channel); 7033 7034 loop_end: ; 7035 } 7036 7037 printf("%s: using %s for native-PCI interrupt\n", 7038 sc->sc_wdcdev.sc_dev.dv_xname, 7039 intrstr ? intrstr : "unknown interrupt"); 7040 } 7041 7042 void 7043 pdc203xx_setup_channel(struct channel_softc *chp) 7044 { 7045 struct ata_drive_datas *drvp; 7046 struct pciide_channel *cp = (struct pciide_channel *)chp; 7047 int drive, s; 7048 7049 pciide_channel_dma_setup(cp); 7050 7051 for (drive = 0; drive < 2; drive++) { 7052 drvp = &chp->ch_drive[drive]; 7053 if ((drvp->drive_flags & DRIVE) == 0) 7054 continue; 7055 if (drvp->drive_flags & DRIVE_UDMA) { 7056 s = splbio(); 7057 drvp->drive_flags &= ~DRIVE_DMA; 7058 splx(s); 7059 } 7060 } 7061 pciide_print_modes(cp); 7062 } 7063 7064 int 7065 pdc203xx_pci_intr(void *arg) 7066 { 7067 struct pciide_softc *sc = arg; 7068 struct pciide_channel *cp; 7069 struct channel_softc *wdc_cp; 7070 struct pciide_pdcsata *ps = sc->sc_cookie; 7071 int i, rv, crv; 7072 u_int32_t scr; 7073 7074 rv = 0; 7075 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 7076 7077 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7078 cp = &sc->pciide_channels[i]; 7079 wdc_cp = &cp->wdc_channel; 7080 if (scr & (1 << (i + 1))) { 7081 crv = wdcintr(wdc_cp); 7082 if (crv == 0) { 7083 printf("%s:%d: bogus intr (reg 0x%x)\n", 7084 sc->sc_wdcdev.sc_dev.dv_xname, 7085 i, scr); 7086 } else 7087 rv = 1; 7088 } 7089 } 7090 7091 return (rv); 7092 } 7093 7094 int 7095 pdc205xx_pci_intr(void *arg) 7096 { 7097 struct pciide_softc *sc = arg; 7098 struct pciide_channel *cp; 7099 struct channel_softc *wdc_cp; 7100 struct pciide_pdcsata *ps = sc->sc_cookie; 7101 int i, rv, crv; 7102 u_int32_t scr, status; 7103 7104 rv = 0; 7105 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 7106 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 7107 7108 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 7109 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 7110 7111 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7112 cp = &sc->pciide_channels[i]; 7113 wdc_cp = &cp->wdc_channel; 7114 if (scr & (1 << (i + 1))) { 7115 crv = wdcintr(wdc_cp); 7116 if (crv == 0) { 7117 printf("%s:%d: bogus intr (reg 0x%x)\n", 7118 sc->sc_wdcdev.sc_dev.dv_xname, 7119 i, scr); 7120 } else 7121 rv = 1; 7122 } 7123 } 7124 return rv; 7125 } 7126 7127 void 7128 pdc203xx_irqack(struct channel_softc *chp) 7129 { 7130 struct pciide_channel *cp = (struct pciide_channel *)chp; 7131 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7132 struct pciide_pdcsata *ps = sc->sc_cookie; 7133 int chan = chp->channel; 7134 7135 bus_space_write_4(sc->sc_dma_iot, 7136 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 7137 (bus_space_read_4(sc->sc_dma_iot, 7138 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 7139 0) & ~0x00003f9f) | (chan + 1)); 7140 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 7141 (chan + 1) << 2, 0x00000001); 7142 } 7143 7144 void 7145 pdc203xx_dma_start(void *v, int channel, int drive) 7146 { 7147 struct pciide_softc *sc = v; 7148 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7149 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7150 struct pciide_pdcsata *ps = sc->sc_cookie; 7151 7152 /* Write table address */ 7153 bus_space_write_4(sc->sc_dma_iot, 7154 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 7155 dma_maps->dmamap_table->dm_segs[0].ds_addr); 7156 7157 /* Start DMA engine */ 7158 bus_space_write_4(sc->sc_dma_iot, 7159 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7160 (bus_space_read_4(sc->sc_dma_iot, 7161 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7162 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 7163 } 7164 7165 int 7166 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 7167 { 7168 struct pciide_softc *sc = v; 7169 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7170 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7171 struct pciide_pdcsata *ps = sc->sc_cookie; 7172 7173 /* Stop DMA channel */ 7174 bus_space_write_4(sc->sc_dma_iot, 7175 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7176 (bus_space_read_4(sc->sc_dma_iot, 7177 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7178 0) & ~0x80)); 7179 7180 /* Unload the map of the data buffer */ 7181 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 7182 dma_maps->dmamap_xfer->dm_mapsize, 7183 (dma_maps->dma_flags & WDC_DMA_READ) ? 7184 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 7185 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 7186 7187 return (0); 7188 } 7189 7190 u_int8_t 7191 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7192 { 7193 struct pciide_channel *cp = (struct pciide_channel *)chp; 7194 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7195 struct pciide_pdcsata *ps = sc->sc_cookie; 7196 u_int8_t val; 7197 7198 if (reg & _WDC_AUX) { 7199 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 7200 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 7201 } else { 7202 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 7203 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 7204 return (val); 7205 } 7206 } 7207 7208 void 7209 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7210 { 7211 struct pciide_channel *cp = (struct pciide_channel *)chp; 7212 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7213 struct pciide_pdcsata *ps = sc->sc_cookie; 7214 7215 if (reg & _WDC_AUX) 7216 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 7217 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 7218 else 7219 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 7220 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 7221 0, val); 7222 } 7223 7224 void 7225 pdc205xx_do_reset(struct channel_softc *chp) 7226 { 7227 struct pciide_channel *cp = (struct pciide_channel *)chp; 7228 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7229 struct pciide_pdcsata *ps = sc->sc_cookie; 7230 u_int32_t scontrol; 7231 7232 wdc_do_reset(chp); 7233 7234 /* reset SATA */ 7235 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7236 SCONTROL_WRITE(ps, chp->channel, scontrol); 7237 delay(50*1000); 7238 7239 scontrol &= ~SControl_DET_INIT; 7240 SCONTROL_WRITE(ps, chp->channel, scontrol); 7241 delay(50*1000); 7242 } 7243 7244 void 7245 pdc205xx_drv_probe(struct channel_softc *chp) 7246 { 7247 struct pciide_channel *cp = (struct pciide_channel *)chp; 7248 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7249 struct pciide_pdcsata *ps = sc->sc_cookie; 7250 bus_space_handle_t *iohs; 7251 u_int32_t scontrol, sstatus; 7252 u_int16_t scnt, sn, cl, ch; 7253 int s; 7254 7255 SCONTROL_WRITE(ps, chp->channel, 0); 7256 delay(50*1000); 7257 7258 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7259 SCONTROL_WRITE(ps,chp->channel,scontrol); 7260 delay(50*1000); 7261 7262 scontrol &= ~SControl_DET_INIT; 7263 SCONTROL_WRITE(ps,chp->channel,scontrol); 7264 delay(50*1000); 7265 7266 sstatus = SSTATUS_READ(ps,chp->channel); 7267 7268 switch (sstatus & SStatus_DET_mask) { 7269 case SStatus_DET_NODEV: 7270 /* No Device; be silent. */ 7271 break; 7272 7273 case SStatus_DET_DEV_NE: 7274 printf("%s: port %d: device connected, but " 7275 "communication not established\n", 7276 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7277 break; 7278 7279 case SStatus_DET_OFFLINE: 7280 printf("%s: port %d: PHY offline\n", 7281 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7282 break; 7283 7284 case SStatus_DET_DEV: 7285 iohs = ps->regs[chp->channel].cmd_iohs; 7286 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 7287 WDSD_IBM); 7288 delay(10); /* 400ns delay */ 7289 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 7290 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 7291 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 7292 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 7293 #if 0 7294 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7295 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7296 scnt, sn, cl, ch); 7297 #endif 7298 /* 7299 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7300 * cases we get wrong values here, so ignore it. 7301 */ 7302 s = splbio(); 7303 if (cl == 0x14 && ch == 0xeb) 7304 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7305 else 7306 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7307 splx(s); 7308 #if 0 7309 printf("%s: port %d", 7310 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7311 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7312 case 1: 7313 printf(": 1.5Gb/s"); 7314 break; 7315 case 2: 7316 printf(": 3.0Gb/s"); 7317 break; 7318 } 7319 printf("\n"); 7320 #endif 7321 break; 7322 7323 default: 7324 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7325 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7326 } 7327 } 7328 7329 #ifdef notyet 7330 /* 7331 * Inline functions for accessing the timing registers of the 7332 * OPTi controller. 7333 * 7334 * These *MUST* disable interrupts as they need atomic access to 7335 * certain magic registers. Failure to adhere to this *will* 7336 * break things in subtle ways if the wdc registers are accessed 7337 * by an interrupt routine while this magic sequence is executing. 7338 */ 7339 static __inline__ u_int8_t 7340 opti_read_config(struct channel_softc *chp, int reg) 7341 { 7342 u_int8_t rv; 7343 int s = splhigh(); 7344 7345 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7346 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7347 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7348 7349 /* Followed by an 8-bit write of 0x3 to register #2 */ 7350 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7351 7352 /* Now we can read the required register */ 7353 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 7354 7355 /* Restore the real registers */ 7356 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7357 7358 splx(s); 7359 7360 return (rv); 7361 } 7362 7363 static __inline__ void 7364 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 7365 { 7366 int s = splhigh(); 7367 7368 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7369 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7370 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7371 7372 /* Followed by an 8-bit write of 0x3 to register #2 */ 7373 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7374 7375 /* Now we can write the required register */ 7376 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 7377 7378 /* Restore the real registers */ 7379 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7380 7381 splx(s); 7382 } 7383 7384 void 7385 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7386 { 7387 struct pciide_channel *cp; 7388 bus_size_t cmdsize, ctlsize; 7389 pcireg_t interface; 7390 u_int8_t init_ctrl; 7391 int channel; 7392 7393 printf(": DMA"); 7394 /* 7395 * XXXSCW: 7396 * There seem to be a couple of buggy revisions/implementations 7397 * of the OPTi pciide chipset. This kludge seems to fix one of 7398 * the reported problems (NetBSD PR/11644) but still fails for the 7399 * other (NetBSD PR/13151), although the latter may be due to other 7400 * issues too... 7401 */ 7402 if (sc->sc_rev <= 0x12) { 7403 printf(" (disabled)"); 7404 sc->sc_dma_ok = 0; 7405 sc->sc_wdcdev.cap = 0; 7406 } else { 7407 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 7408 pciide_mapreg_dma(sc, pa); 7409 } 7410 7411 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 7412 sc->sc_wdcdev.PIO_cap = 4; 7413 if (sc->sc_dma_ok) { 7414 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7415 sc->sc_wdcdev.irqack = pciide_irqack; 7416 sc->sc_wdcdev.DMA_cap = 2; 7417 } 7418 sc->sc_wdcdev.set_modes = opti_setup_channel; 7419 7420 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7421 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7422 7423 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 7424 OPTI_REG_INIT_CONTROL); 7425 7426 interface = PCI_INTERFACE(pa->pa_class); 7427 7428 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7429 7430 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7431 cp = &sc->pciide_channels[channel]; 7432 if (pciide_chansetup(sc, channel, interface) == 0) 7433 continue; 7434 if (channel == 1 && 7435 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 7436 printf("%s: %s ignored (disabled)\n", 7437 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7438 cp->hw_ok = 0; 7439 continue; 7440 } 7441 pciide_map_compat_intr(pa, cp, channel, interface); 7442 if (cp->hw_ok == 0) 7443 continue; 7444 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7445 pciide_pci_intr); 7446 if (cp->hw_ok == 0) { 7447 pciide_unmap_compat_intr(pa, cp, channel, interface); 7448 continue; 7449 } 7450 opti_setup_channel(&cp->wdc_channel); 7451 } 7452 } 7453 7454 void 7455 opti_setup_channel(struct channel_softc *chp) 7456 { 7457 struct ata_drive_datas *drvp; 7458 struct pciide_channel *cp = (struct pciide_channel *)chp; 7459 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7460 int drive, spd; 7461 int mode[2]; 7462 u_int8_t rv, mr; 7463 7464 /* 7465 * The `Delay' and `Address Setup Time' fields of the 7466 * Miscellaneous Register are always zero initially. 7467 */ 7468 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 7469 mr &= ~(OPTI_MISC_DELAY_MASK | 7470 OPTI_MISC_ADDR_SETUP_MASK | 7471 OPTI_MISC_INDEX_MASK); 7472 7473 /* Prime the control register before setting timing values */ 7474 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 7475 7476 /* Determine the clockrate of the PCIbus the chip is attached to */ 7477 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7478 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7479 7480 /* setup DMA if needed */ 7481 pciide_channel_dma_setup(cp); 7482 7483 for (drive = 0; drive < 2; drive++) { 7484 drvp = &chp->ch_drive[drive]; 7485 /* If no drive, skip */ 7486 if ((drvp->drive_flags & DRIVE) == 0) { 7487 mode[drive] = -1; 7488 continue; 7489 } 7490 7491 if ((drvp->drive_flags & DRIVE_DMA)) { 7492 /* 7493 * Timings will be used for both PIO and DMA, 7494 * so adjust DMA mode if needed 7495 */ 7496 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7497 drvp->PIO_mode = drvp->DMA_mode + 2; 7498 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7499 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7500 drvp->PIO_mode - 2 : 0; 7501 if (drvp->DMA_mode == 0) 7502 drvp->PIO_mode = 0; 7503 7504 mode[drive] = drvp->DMA_mode + 5; 7505 } else 7506 mode[drive] = drvp->PIO_mode; 7507 7508 if (drive && mode[0] >= 0 && 7509 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7510 /* 7511 * Can't have two drives using different values 7512 * for `Address Setup Time'. 7513 * Slow down the faster drive to compensate. 7514 */ 7515 int d = (opti_tim_as[spd][mode[0]] > 7516 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7517 7518 mode[d] = mode[1-d]; 7519 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7520 chp->ch_drive[d].DMA_mode = 0; 7521 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7522 } 7523 } 7524 7525 for (drive = 0; drive < 2; drive++) { 7526 int m; 7527 if ((m = mode[drive]) < 0) 7528 continue; 7529 7530 /* Set the Address Setup Time and select appropriate index */ 7531 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7532 rv |= OPTI_MISC_INDEX(drive); 7533 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7534 7535 /* Set the pulse width and recovery timing parameters */ 7536 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7537 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7538 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7539 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7540 7541 /* Set the Enhanced Mode register appropriately */ 7542 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7543 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7544 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7545 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7546 } 7547 7548 /* Finally, enable the timings */ 7549 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7550 7551 pciide_print_modes(cp); 7552 } 7553 #endif 7554 7555 void 7556 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7557 { 7558 struct pciide_channel *cp; 7559 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7560 pcitag_t pcib_tag; 7561 int channel; 7562 bus_size_t cmdsize, ctlsize; 7563 7564 printf(": DMA"); 7565 pciide_mapreg_dma(sc, pa); 7566 printf("\n"); 7567 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7568 WDC_CAPABILITY_MODE; 7569 7570 if (sc->sc_dma_ok) { 7571 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7572 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7573 sc->sc_wdcdev.irqack = pciide_irqack; 7574 } 7575 sc->sc_wdcdev.PIO_cap = 4; 7576 sc->sc_wdcdev.DMA_cap = 2; 7577 switch (sc->sc_pp->ide_product) { 7578 case PCI_PRODUCT_RCC_OSB4_IDE: 7579 sc->sc_wdcdev.UDMA_cap = 2; 7580 break; 7581 case PCI_PRODUCT_RCC_CSB5_IDE: 7582 if (sc->sc_rev < 0x92) 7583 sc->sc_wdcdev.UDMA_cap = 4; 7584 else 7585 sc->sc_wdcdev.UDMA_cap = 5; 7586 break; 7587 case PCI_PRODUCT_RCC_CSB6_IDE: 7588 sc->sc_wdcdev.UDMA_cap = 4; 7589 break; 7590 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7591 case PCI_PRODUCT_RCC_HT_1000_IDE: 7592 sc->sc_wdcdev.UDMA_cap = 5; 7593 break; 7594 } 7595 7596 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7597 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7598 sc->sc_wdcdev.nchannels = 7599 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7600 7601 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7602 cp = &sc->pciide_channels[channel]; 7603 if (pciide_chansetup(sc, channel, interface) == 0) 7604 continue; 7605 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7606 serverworks_pci_intr); 7607 if (cp->hw_ok == 0) 7608 return; 7609 pciide_map_compat_intr(pa, cp, channel, interface); 7610 if (cp->hw_ok == 0) 7611 return; 7612 serverworks_setup_channel(&cp->wdc_channel); 7613 } 7614 7615 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7616 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7617 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7618 } 7619 7620 void 7621 serverworks_setup_channel(struct channel_softc *chp) 7622 { 7623 struct ata_drive_datas *drvp; 7624 struct pciide_channel *cp = (struct pciide_channel *)chp; 7625 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7626 int channel = chp->channel; 7627 int drive, unit; 7628 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7629 u_int32_t idedma_ctl; 7630 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7631 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7632 7633 /* setup DMA if needed */ 7634 pciide_channel_dma_setup(cp); 7635 7636 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7637 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7638 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7639 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7640 7641 pio_time &= ~(0xffff << (16 * channel)); 7642 dma_time &= ~(0xffff << (16 * channel)); 7643 pio_mode &= ~(0xff << (8 * channel + 16)); 7644 udma_mode &= ~(0xff << (8 * channel + 16)); 7645 udma_mode &= ~(3 << (2 * channel)); 7646 7647 idedma_ctl = 0; 7648 7649 /* Per drive settings */ 7650 for (drive = 0; drive < 2; drive++) { 7651 drvp = &chp->ch_drive[drive]; 7652 /* If no drive, skip */ 7653 if ((drvp->drive_flags & DRIVE) == 0) 7654 continue; 7655 unit = drive + 2 * channel; 7656 /* add timing values, setup DMA if needed */ 7657 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7658 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7659 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7660 (drvp->drive_flags & DRIVE_UDMA)) { 7661 /* use Ultra/DMA, check for 80-pin cable */ 7662 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7663 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7664 PCI_SUBSYS_ID_REG)) & 7665 (1 << (14 + channel))) == 0) { 7666 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7667 "cable not detected\n", drvp->drive_name, 7668 sc->sc_wdcdev.sc_dev.dv_xname, 7669 channel, drive), DEBUG_PROBE); 7670 drvp->UDMA_mode = 2; 7671 } 7672 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7673 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7674 udma_mode |= 1 << unit; 7675 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7676 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7677 (drvp->drive_flags & DRIVE_DMA)) { 7678 /* use Multiword DMA */ 7679 drvp->drive_flags &= ~DRIVE_UDMA; 7680 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7681 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7682 } else { 7683 /* PIO only */ 7684 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7685 } 7686 } 7687 7688 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7689 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7690 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7691 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7692 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7693 7694 if (idedma_ctl != 0) { 7695 /* Add software bits in status register */ 7696 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7697 IDEDMA_CTL(channel), idedma_ctl); 7698 } 7699 pciide_print_modes(cp); 7700 } 7701 7702 int 7703 serverworks_pci_intr(void *arg) 7704 { 7705 struct pciide_softc *sc = arg; 7706 struct pciide_channel *cp; 7707 struct channel_softc *wdc_cp; 7708 int rv = 0; 7709 int dmastat, i, crv; 7710 7711 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7712 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7713 IDEDMA_CTL(i)); 7714 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7715 IDEDMA_CTL_INTR) 7716 continue; 7717 cp = &sc->pciide_channels[i]; 7718 wdc_cp = &cp->wdc_channel; 7719 crv = wdcintr(wdc_cp); 7720 if (crv == 0) { 7721 printf("%s:%d: bogus intr\n", 7722 sc->sc_wdcdev.sc_dev.dv_xname, i); 7723 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7724 IDEDMA_CTL(i), dmastat); 7725 } else 7726 rv = 1; 7727 } 7728 return (rv); 7729 } 7730 7731 void 7732 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7733 { 7734 struct pciide_channel *cp; 7735 pci_intr_handle_t intrhandle; 7736 const char *intrstr; 7737 int channel; 7738 struct pciide_svwsata *ss; 7739 7740 /* Allocate memory for private data */ 7741 sc->sc_cookielen = sizeof(*ss); 7742 sc->sc_cookie = malloc(sc->sc_cookielen, M_DEVBUF, M_NOWAIT | M_ZERO); 7743 ss = sc->sc_cookie; 7744 7745 /* The 4-port version has a dummy second function. */ 7746 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7747 PCI_MAPREG_START + 0x14) == 0) { 7748 printf("\n"); 7749 return; 7750 } 7751 7752 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7753 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7754 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7755 printf(": unable to map BA5 register space\n"); 7756 return; 7757 } 7758 7759 printf(": DMA"); 7760 svwsata_mapreg_dma(sc, pa); 7761 printf("\n"); 7762 7763 if (sc->sc_dma_ok) { 7764 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7765 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7766 sc->sc_wdcdev.irqack = pciide_irqack; 7767 } 7768 sc->sc_wdcdev.PIO_cap = 4; 7769 sc->sc_wdcdev.DMA_cap = 2; 7770 sc->sc_wdcdev.UDMA_cap = 6; 7771 7772 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7773 sc->sc_wdcdev.nchannels = 4; 7774 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7775 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7776 sc->sc_wdcdev.set_modes = sata_setup_channel; 7777 7778 /* We can use SControl and SStatus to probe for drives. */ 7779 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7780 7781 /* Map and establish the interrupt handler. */ 7782 if(pci_intr_map(pa, &intrhandle) != 0) { 7783 printf("%s: couldn't map native-PCI interrupt\n", 7784 sc->sc_wdcdev.sc_dev.dv_xname); 7785 return; 7786 } 7787 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7788 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7789 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7790 if (sc->sc_pci_ih != NULL) { 7791 printf("%s: using %s for native-PCI interrupt\n", 7792 sc->sc_wdcdev.sc_dev.dv_xname, 7793 intrstr ? intrstr : "unknown interrupt"); 7794 } else { 7795 printf("%s: couldn't establish native-PCI interrupt", 7796 sc->sc_wdcdev.sc_dev.dv_xname); 7797 if (intrstr != NULL) 7798 printf(" at %s", intrstr); 7799 printf("\n"); 7800 return; 7801 } 7802 7803 switch (sc->sc_pp->ide_product) { 7804 case PCI_PRODUCT_RCC_K2_SATA: 7805 bus_space_write_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1, 7806 bus_space_read_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1) 7807 & ~0x00040000); 7808 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7809 SVWSATA_SIM, 0); 7810 break; 7811 } 7812 7813 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7814 cp = &sc->pciide_channels[channel]; 7815 if (pciide_chansetup(sc, channel, 0) == 0) 7816 continue; 7817 svwsata_mapchan(cp); 7818 sata_setup_channel(&cp->wdc_channel); 7819 } 7820 } 7821 7822 void 7823 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7824 { 7825 struct pciide_svwsata *ss = sc->sc_cookie; 7826 7827 sc->sc_wdcdev.dma_arg = sc; 7828 sc->sc_wdcdev.dma_init = pciide_dma_init; 7829 sc->sc_wdcdev.dma_start = pciide_dma_start; 7830 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7831 7832 /* XXX */ 7833 sc->sc_dma_iot = ss->ba5_st; 7834 sc->sc_dma_ioh = ss->ba5_sh; 7835 7836 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7837 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7838 sc->sc_dmactl_read = svwsata_dmactl_read; 7839 sc->sc_dmactl_write = svwsata_dmactl_write; 7840 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7841 7842 /* DMA registers all set up! */ 7843 sc->sc_dmat = pa->pa_dmat; 7844 sc->sc_dma_ok = 1; 7845 } 7846 7847 u_int8_t 7848 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7849 { 7850 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7851 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7852 } 7853 7854 void 7855 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7856 { 7857 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7858 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7859 } 7860 7861 u_int8_t 7862 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7863 { 7864 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7865 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7866 } 7867 7868 void 7869 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7870 { 7871 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7872 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7873 } 7874 7875 void 7876 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7877 { 7878 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7879 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7880 } 7881 7882 void 7883 svwsata_mapchan(struct pciide_channel *cp) 7884 { 7885 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7886 struct channel_softc *wdc_cp = &cp->wdc_channel; 7887 struct pciide_svwsata *ss = sc->sc_cookie; 7888 7889 cp->compat = 0; 7890 cp->ih = sc->sc_pci_ih; 7891 7892 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7893 (wdc_cp->channel << 8) + SVWSATA_TF0, 7894 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7895 printf("%s: couldn't map %s cmd regs\n", 7896 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7897 return; 7898 } 7899 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7900 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7901 &wdc_cp->ctl_ioh) != 0) { 7902 printf("%s: couldn't map %s ctl regs\n", 7903 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7904 return; 7905 } 7906 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7907 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7908 wdc_cp->ch_flags |= WDCF_DMA_BEFORE_CMD; 7909 wdcattach(wdc_cp); 7910 } 7911 7912 void 7913 svwsata_drv_probe(struct channel_softc *chp) 7914 { 7915 struct pciide_channel *cp = (struct pciide_channel *)chp; 7916 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7917 struct pciide_svwsata *ss = sc->sc_cookie; 7918 int channel = chp->channel; 7919 uint32_t scontrol, sstatus; 7920 uint8_t scnt, sn, cl, ch; 7921 int s; 7922 7923 /* 7924 * Request communication initialization sequence, any speed. 7925 * Performing this is the equivalent of an ATA Reset. 7926 */ 7927 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7928 7929 /* 7930 * XXX We don't yet support SATA power management; disable all 7931 * power management state transitions. 7932 */ 7933 scontrol |= SControl_IPM_NONE; 7934 7935 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7936 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7937 delay(50 * 1000); 7938 scontrol &= ~SControl_DET_INIT; 7939 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7940 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7941 delay(100 * 1000); 7942 7943 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7944 (channel << 8) + SVWSATA_SSTATUS); 7945 #if 0 7946 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7947 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7948 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7949 (channel << 8) + SVWSATA_SSTATUS)); 7950 #endif 7951 switch (sstatus & SStatus_DET_mask) { 7952 case SStatus_DET_NODEV: 7953 /* No device; be silent. */ 7954 break; 7955 7956 case SStatus_DET_DEV_NE: 7957 printf("%s: port %d: device connected, but " 7958 "communication not established\n", 7959 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7960 break; 7961 7962 case SStatus_DET_OFFLINE: 7963 printf("%s: port %d: PHY offline\n", 7964 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7965 break; 7966 7967 case SStatus_DET_DEV: 7968 /* 7969 * XXX ATAPI detection doesn't currently work. Don't 7970 * XXX know why. But, it's not like the standard method 7971 * XXX can detect an ATAPI device connected via a SATA/PATA 7972 * XXX bridge, so at least this is no worse. --thorpej 7973 */ 7974 if (chp->_vtbl != NULL) 7975 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7976 else 7977 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7978 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7979 delay(10); /* 400ns delay */ 7980 /* Save register contents. */ 7981 if (chp->_vtbl != NULL) { 7982 scnt = CHP_READ_REG(chp, wdr_seccnt); 7983 sn = CHP_READ_REG(chp, wdr_sector); 7984 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7985 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7986 } else { 7987 scnt = bus_space_read_1(chp->cmd_iot, 7988 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7989 sn = bus_space_read_1(chp->cmd_iot, 7990 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7991 cl = bus_space_read_1(chp->cmd_iot, 7992 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7993 ch = bus_space_read_1(chp->cmd_iot, 7994 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7995 } 7996 #if 0 7997 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7998 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7999 scnt, sn, cl, ch); 8000 #endif 8001 /* 8002 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 8003 * cases we get wrong values here, so ignore it. 8004 */ 8005 s = splbio(); 8006 if (cl == 0x14 && ch == 0xeb) 8007 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 8008 else 8009 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 8010 splx(s); 8011 8012 printf("%s: port %d", 8013 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 8014 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 8015 case 1: 8016 printf(": 1.5Gb/s"); 8017 break; 8018 case 2: 8019 printf(": 3.0Gb/s"); 8020 break; 8021 } 8022 printf("\n"); 8023 break; 8024 8025 default: 8026 printf("%s: port %d: unknown SStatus: 0x%08x\n", 8027 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 8028 } 8029 } 8030 8031 u_int8_t 8032 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 8033 { 8034 if (reg & _WDC_AUX) { 8035 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 8036 (reg & _WDC_REGMASK) << 2)); 8037 } else { 8038 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 8039 (reg & _WDC_REGMASK) << 2)); 8040 } 8041 } 8042 8043 void 8044 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 8045 { 8046 if (reg & _WDC_AUX) { 8047 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 8048 (reg & _WDC_REGMASK) << 2, val); 8049 } else { 8050 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 8051 (reg & _WDC_REGMASK) << 2, val); 8052 } 8053 } 8054 8055 void 8056 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 8057 { 8058 if (reg & _WDC_AUX) { 8059 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 8060 (reg & _WDC_REGMASK) << 2, val); 8061 } else { 8062 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 8063 (reg & _WDC_REGMASK) << 2, val); 8064 } 8065 } 8066 8067 #define ACARD_IS_850(sc) \ 8068 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 8069 8070 void 8071 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8072 { 8073 struct pciide_channel *cp; 8074 int i; 8075 pcireg_t interface; 8076 bus_size_t cmdsize, ctlsize; 8077 8078 /* 8079 * when the chip is in native mode it identifies itself as a 8080 * 'misc mass storage'. Fake interface in this case. 8081 */ 8082 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 8083 interface = PCI_INTERFACE(pa->pa_class); 8084 } else { 8085 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8086 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8087 } 8088 8089 printf(": DMA"); 8090 pciide_mapreg_dma(sc, pa); 8091 printf("\n"); 8092 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8093 WDC_CAPABILITY_MODE; 8094 8095 if (sc->sc_dma_ok) { 8096 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8097 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8098 sc->sc_wdcdev.irqack = pciide_irqack; 8099 } 8100 sc->sc_wdcdev.PIO_cap = 4; 8101 sc->sc_wdcdev.DMA_cap = 2; 8102 switch (sc->sc_pp->ide_product) { 8103 case PCI_PRODUCT_ACARD_ATP850U: 8104 sc->sc_wdcdev.UDMA_cap = 2; 8105 break; 8106 case PCI_PRODUCT_ACARD_ATP860: 8107 case PCI_PRODUCT_ACARD_ATP860A: 8108 sc->sc_wdcdev.UDMA_cap = 4; 8109 break; 8110 case PCI_PRODUCT_ACARD_ATP865A: 8111 case PCI_PRODUCT_ACARD_ATP865R: 8112 sc->sc_wdcdev.UDMA_cap = 6; 8113 break; 8114 } 8115 8116 sc->sc_wdcdev.set_modes = acard_setup_channel; 8117 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8118 sc->sc_wdcdev.nchannels = 2; 8119 8120 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8121 cp = &sc->pciide_channels[i]; 8122 if (pciide_chansetup(sc, i, interface) == 0) 8123 continue; 8124 if (interface & PCIIDE_INTERFACE_PCI(i)) { 8125 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 8126 &ctlsize, pciide_pci_intr); 8127 } else { 8128 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 8129 &cmdsize, &ctlsize); 8130 } 8131 if (cp->hw_ok == 0) 8132 return; 8133 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 8134 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 8135 wdcattach(&cp->wdc_channel); 8136 acard_setup_channel(&cp->wdc_channel); 8137 } 8138 if (!ACARD_IS_850(sc)) { 8139 u_int32_t reg; 8140 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 8141 reg &= ~ATP860_CTRL_INT; 8142 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 8143 } 8144 } 8145 8146 void 8147 acard_setup_channel(struct channel_softc *chp) 8148 { 8149 struct ata_drive_datas *drvp; 8150 struct pciide_channel *cp = (struct pciide_channel *)chp; 8151 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8152 int channel = chp->channel; 8153 int drive; 8154 u_int32_t idetime, udma_mode; 8155 u_int32_t idedma_ctl; 8156 8157 /* setup DMA if needed */ 8158 pciide_channel_dma_setup(cp); 8159 8160 if (ACARD_IS_850(sc)) { 8161 idetime = 0; 8162 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 8163 udma_mode &= ~ATP850_UDMA_MASK(channel); 8164 } else { 8165 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 8166 idetime &= ~ATP860_SETTIME_MASK(channel); 8167 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 8168 udma_mode &= ~ATP860_UDMA_MASK(channel); 8169 } 8170 8171 idedma_ctl = 0; 8172 8173 /* Per drive settings */ 8174 for (drive = 0; drive < 2; drive++) { 8175 drvp = &chp->ch_drive[drive]; 8176 /* If no drive, skip */ 8177 if ((drvp->drive_flags & DRIVE) == 0) 8178 continue; 8179 /* add timing values, setup DMA if needed */ 8180 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 8181 (drvp->drive_flags & DRIVE_UDMA)) { 8182 /* use Ultra/DMA */ 8183 if (ACARD_IS_850(sc)) { 8184 idetime |= ATP850_SETTIME(drive, 8185 acard_act_udma[drvp->UDMA_mode], 8186 acard_rec_udma[drvp->UDMA_mode]); 8187 udma_mode |= ATP850_UDMA_MODE(channel, drive, 8188 acard_udma_conf[drvp->UDMA_mode]); 8189 } else { 8190 idetime |= ATP860_SETTIME(channel, drive, 8191 acard_act_udma[drvp->UDMA_mode], 8192 acard_rec_udma[drvp->UDMA_mode]); 8193 udma_mode |= ATP860_UDMA_MODE(channel, drive, 8194 acard_udma_conf[drvp->UDMA_mode]); 8195 } 8196 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8197 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 8198 (drvp->drive_flags & DRIVE_DMA)) { 8199 /* use Multiword DMA */ 8200 drvp->drive_flags &= ~DRIVE_UDMA; 8201 if (ACARD_IS_850(sc)) { 8202 idetime |= ATP850_SETTIME(drive, 8203 acard_act_dma[drvp->DMA_mode], 8204 acard_rec_dma[drvp->DMA_mode]); 8205 } else { 8206 idetime |= ATP860_SETTIME(channel, drive, 8207 acard_act_dma[drvp->DMA_mode], 8208 acard_rec_dma[drvp->DMA_mode]); 8209 } 8210 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8211 } else { 8212 /* PIO only */ 8213 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 8214 if (ACARD_IS_850(sc)) { 8215 idetime |= ATP850_SETTIME(drive, 8216 acard_act_pio[drvp->PIO_mode], 8217 acard_rec_pio[drvp->PIO_mode]); 8218 } else { 8219 idetime |= ATP860_SETTIME(channel, drive, 8220 acard_act_pio[drvp->PIO_mode], 8221 acard_rec_pio[drvp->PIO_mode]); 8222 } 8223 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 8224 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 8225 | ATP8x0_CTRL_EN(channel)); 8226 } 8227 } 8228 8229 if (idedma_ctl != 0) { 8230 /* Add software bits in status register */ 8231 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8232 IDEDMA_CTL(channel), idedma_ctl); 8233 } 8234 pciide_print_modes(cp); 8235 8236 if (ACARD_IS_850(sc)) { 8237 pci_conf_write(sc->sc_pc, sc->sc_tag, 8238 ATP850_IDETIME(channel), idetime); 8239 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 8240 } else { 8241 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 8242 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 8243 } 8244 } 8245 8246 void 8247 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8248 { 8249 struct pciide_channel *cp; 8250 int channel; 8251 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8252 bus_size_t cmdsize, ctlsize; 8253 u_int32_t conf; 8254 8255 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8256 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8257 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8258 8259 printf(": DMA"); 8260 pciide_mapreg_dma(sc, pa); 8261 8262 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8263 WDC_CAPABILITY_MODE; 8264 if (sc->sc_dma_ok) { 8265 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8266 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8267 sc->sc_wdcdev.irqack = pciide_irqack; 8268 } 8269 sc->sc_wdcdev.PIO_cap = 4; 8270 sc->sc_wdcdev.DMA_cap = 2; 8271 switch (sc->sc_pp->ide_product) { 8272 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 8273 sc->sc_wdcdev.UDMA_cap = 5; 8274 break; 8275 default: 8276 sc->sc_wdcdev.UDMA_cap = 6; 8277 } 8278 sc->sc_wdcdev.set_modes = nforce_setup_channel; 8279 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8280 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8281 8282 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8283 8284 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8285 cp = &sc->pciide_channels[channel]; 8286 8287 if (pciide_chansetup(sc, channel, interface) == 0) 8288 continue; 8289 8290 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 8291 printf("%s: %s ignored (disabled)\n", 8292 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8293 cp->hw_ok = 0; 8294 continue; 8295 } 8296 8297 pciide_map_compat_intr(pa, cp, channel, interface); 8298 if (cp->hw_ok == 0) 8299 continue; 8300 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8301 nforce_pci_intr); 8302 if (cp->hw_ok == 0) { 8303 pciide_unmap_compat_intr(pa, cp, channel, interface); 8304 continue; 8305 } 8306 8307 if (pciide_chan_candisable(cp)) { 8308 conf &= ~NFORCE_CHAN_EN(channel); 8309 pciide_unmap_compat_intr(pa, cp, channel, interface); 8310 continue; 8311 } 8312 8313 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8314 } 8315 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8316 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8317 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8318 } 8319 8320 void 8321 nforce_setup_channel(struct channel_softc *chp) 8322 { 8323 struct ata_drive_datas *drvp; 8324 int drive, mode; 8325 u_int32_t idedma_ctl; 8326 struct pciide_channel *cp = (struct pciide_channel *)chp; 8327 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8328 int channel = chp->channel; 8329 u_int32_t conf, piodmatim, piotim, udmatim; 8330 8331 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8332 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 8333 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 8334 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 8335 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 8336 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8337 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8338 8339 /* Setup DMA if needed */ 8340 pciide_channel_dma_setup(cp); 8341 8342 /* Clear all bits for this channel */ 8343 idedma_ctl = 0; 8344 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 8345 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 8346 8347 /* Per channel settings */ 8348 for (drive = 0; drive < 2; drive++) { 8349 drvp = &chp->ch_drive[drive]; 8350 8351 /* If no drive, skip */ 8352 if ((drvp->drive_flags & DRIVE) == 0) 8353 continue; 8354 8355 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8356 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8357 /* Setup UltraDMA mode */ 8358 drvp->drive_flags &= ~DRIVE_DMA; 8359 8360 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 8361 nforce_udma[drvp->UDMA_mode]) | 8362 NFORCE_UDMA_EN(channel, drive) | 8363 NFORCE_UDMA_ENM(channel, drive); 8364 8365 mode = drvp->PIO_mode; 8366 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8367 (drvp->drive_flags & DRIVE_DMA) != 0) { 8368 /* Setup multiword DMA mode */ 8369 drvp->drive_flags &= ~DRIVE_UDMA; 8370 8371 /* mode = min(pio, dma + 2) */ 8372 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8373 mode = drvp->PIO_mode; 8374 else 8375 mode = drvp->DMA_mode + 2; 8376 } else { 8377 mode = drvp->PIO_mode; 8378 goto pio; 8379 } 8380 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8381 8382 pio: 8383 /* Setup PIO mode */ 8384 if (mode <= 2) { 8385 drvp->DMA_mode = 0; 8386 drvp->PIO_mode = 0; 8387 mode = 0; 8388 } else { 8389 drvp->PIO_mode = mode; 8390 drvp->DMA_mode = mode - 2; 8391 } 8392 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 8393 nforce_pio[mode]); 8394 } 8395 8396 if (idedma_ctl != 0) { 8397 /* Add software bits in status register */ 8398 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8399 IDEDMA_CTL(channel), idedma_ctl); 8400 } 8401 8402 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 8403 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8404 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8405 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 8406 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 8407 8408 pciide_print_modes(cp); 8409 } 8410 8411 int 8412 nforce_pci_intr(void *arg) 8413 { 8414 struct pciide_softc *sc = arg; 8415 struct pciide_channel *cp; 8416 struct channel_softc *wdc_cp; 8417 int i, rv, crv; 8418 u_int32_t dmastat; 8419 8420 rv = 0; 8421 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8422 cp = &sc->pciide_channels[i]; 8423 wdc_cp = &cp->wdc_channel; 8424 8425 /* Skip compat channel */ 8426 if (cp->compat) 8427 continue; 8428 8429 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8430 IDEDMA_CTL(i)); 8431 if ((dmastat & IDEDMA_CTL_INTR) == 0) 8432 continue; 8433 8434 crv = wdcintr(wdc_cp); 8435 if (crv == 0) 8436 printf("%s:%d: bogus intr\n", 8437 sc->sc_wdcdev.sc_dev.dv_xname, i); 8438 else 8439 rv = 1; 8440 } 8441 return (rv); 8442 } 8443 8444 void 8445 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8446 { 8447 struct pciide_channel *cp; 8448 bus_size_t cmdsize, ctlsize; 8449 pcireg_t interface; 8450 int channel; 8451 8452 printf(": DMA"); 8453 #ifdef PCIIDE_I31244_DISABLEDMA 8454 if (sc->sc_rev == 0) { 8455 printf(" disabled due to rev. 0"); 8456 sc->sc_dma_ok = 0; 8457 } else 8458 #endif 8459 pciide_mapreg_dma(sc, pa); 8460 printf("\n"); 8461 8462 /* 8463 * XXX Configure LEDs to show activity. 8464 */ 8465 8466 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8467 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 8468 sc->sc_wdcdev.PIO_cap = 4; 8469 if (sc->sc_dma_ok) { 8470 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8471 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8472 sc->sc_wdcdev.irqack = pciide_irqack; 8473 sc->sc_wdcdev.DMA_cap = 2; 8474 sc->sc_wdcdev.UDMA_cap = 6; 8475 } 8476 sc->sc_wdcdev.set_modes = sata_setup_channel; 8477 8478 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8479 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8480 8481 interface = PCI_INTERFACE(pa->pa_class); 8482 8483 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8484 cp = &sc->pciide_channels[channel]; 8485 if (pciide_chansetup(sc, channel, interface) == 0) 8486 continue; 8487 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8488 pciide_pci_intr); 8489 if (cp->hw_ok == 0) 8490 continue; 8491 pciide_map_compat_intr(pa, cp, channel, interface); 8492 sata_setup_channel(&cp->wdc_channel); 8493 } 8494 } 8495 8496 void 8497 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8498 { 8499 struct pciide_channel *cp; 8500 int channel; 8501 pcireg_t interface; 8502 bus_size_t cmdsize, ctlsize; 8503 pcireg_t cfg, modectl; 8504 8505 /* 8506 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8507 */ 8508 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8509 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8510 8511 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8512 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8513 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8514 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8515 modectl & IT_MODE_MASK), DEBUG_PROBE); 8516 8517 printf(": DMA"); 8518 pciide_mapreg_dma(sc, pa); 8519 8520 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8521 WDC_CAPABILITY_MODE; 8522 if (sc->sc_dma_ok) { 8523 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8524 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8525 sc->sc_wdcdev.irqack = pciide_irqack; 8526 } 8527 sc->sc_wdcdev.PIO_cap = 4; 8528 sc->sc_wdcdev.DMA_cap = 2; 8529 sc->sc_wdcdev.UDMA_cap = 6; 8530 8531 sc->sc_wdcdev.set_modes = ite_setup_channel; 8532 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8533 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8534 8535 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8536 8537 /* Disable RAID */ 8538 modectl &= ~IT_MODE_RAID1; 8539 /* Disable CPU firmware mode */ 8540 modectl &= ~IT_MODE_CPU; 8541 8542 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8543 8544 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8545 cp = &sc->pciide_channels[channel]; 8546 8547 if (pciide_chansetup(sc, channel, interface) == 0) 8548 continue; 8549 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8550 pciide_pci_intr); 8551 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8552 } 8553 8554 /* Re-read configuration registers after channels setup */ 8555 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8556 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8557 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8558 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8559 modectl & IT_MODE_MASK), DEBUG_PROBE); 8560 } 8561 8562 void 8563 ite_setup_channel(struct channel_softc *chp) 8564 { 8565 struct ata_drive_datas *drvp; 8566 int drive, mode; 8567 u_int32_t idedma_ctl; 8568 struct pciide_channel *cp = (struct pciide_channel *)chp; 8569 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8570 int channel = chp->channel; 8571 pcireg_t cfg, modectl; 8572 pcireg_t tim; 8573 8574 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8575 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8576 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8577 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8578 channel, tim), DEBUG_PROBE); 8579 8580 /* Setup DMA if needed */ 8581 pciide_channel_dma_setup(cp); 8582 8583 /* Clear all bits for this channel */ 8584 idedma_ctl = 0; 8585 8586 /* Per channel settings */ 8587 for (drive = 0; drive < 2; drive++) { 8588 drvp = &chp->ch_drive[drive]; 8589 8590 /* If no drive, skip */ 8591 if ((drvp->drive_flags & DRIVE) == 0) 8592 continue; 8593 8594 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8595 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8596 /* Setup UltraDMA mode */ 8597 drvp->drive_flags &= ~DRIVE_DMA; 8598 modectl &= ~IT_MODE_DMA(channel, drive); 8599 8600 #if 0 8601 /* Check cable, works only in CPU firmware mode */ 8602 if (drvp->UDMA_mode > 2 && 8603 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8604 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8605 "80-wire cable not detected\n", 8606 drvp->drive_name, 8607 sc->sc_wdcdev.sc_dev.dv_xname, 8608 channel, drive), DEBUG_PROBE); 8609 drvp->UDMA_mode = 2; 8610 } 8611 #endif 8612 8613 if (drvp->UDMA_mode >= 5) 8614 tim |= IT_TIM_UDMA5(drive); 8615 else 8616 tim &= ~IT_TIM_UDMA5(drive); 8617 8618 mode = drvp->PIO_mode; 8619 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8620 (drvp->drive_flags & DRIVE_DMA) != 0) { 8621 /* Setup multiword DMA mode */ 8622 drvp->drive_flags &= ~DRIVE_UDMA; 8623 modectl |= IT_MODE_DMA(channel, drive); 8624 8625 /* mode = min(pio, dma + 2) */ 8626 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8627 mode = drvp->PIO_mode; 8628 else 8629 mode = drvp->DMA_mode + 2; 8630 } else { 8631 mode = drvp->PIO_mode; 8632 goto pio; 8633 } 8634 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8635 8636 pio: 8637 /* Setup PIO mode */ 8638 if (mode <= 2) { 8639 drvp->DMA_mode = 0; 8640 drvp->PIO_mode = 0; 8641 mode = 0; 8642 } else { 8643 drvp->PIO_mode = mode; 8644 drvp->DMA_mode = mode - 2; 8645 } 8646 8647 /* Enable IORDY if PIO mode >= 3 */ 8648 if (drvp->PIO_mode >= 3) 8649 cfg |= IT_CFG_IORDY(channel); 8650 } 8651 8652 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8653 tim), DEBUG_PROBE); 8654 8655 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8656 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8657 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8658 8659 if (idedma_ctl != 0) { 8660 /* Add software bits in status register */ 8661 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8662 IDEDMA_CTL(channel), idedma_ctl); 8663 } 8664 8665 pciide_print_modes(cp); 8666 } 8667 8668 void 8669 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8670 { 8671 struct pciide_channel *cp; 8672 int channel; 8673 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8674 bus_size_t cmdsize, ctlsize; 8675 8676 printf(": DMA"); 8677 pciide_mapreg_dma(sc, pa); 8678 8679 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8680 WDC_CAPABILITY_MODE; 8681 if (sc->sc_dma_ok) { 8682 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8683 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8684 sc->sc_wdcdev.irqack = pciide_irqack; 8685 } 8686 sc->sc_wdcdev.PIO_cap = 4; 8687 sc->sc_wdcdev.DMA_cap = 2; 8688 sc->sc_wdcdev.UDMA_cap = 6; 8689 8690 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8691 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8692 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8693 8694 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8695 8696 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8697 cp = &sc->pciide_channels[channel]; 8698 if (pciide_chansetup(sc, channel, interface) == 0) 8699 continue; 8700 pciide_map_compat_intr(pa, cp, channel, interface); 8701 if (cp->hw_ok == 0) 8702 continue; 8703 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8704 pciide_pci_intr); 8705 if (cp->hw_ok == 0) { 8706 pciide_unmap_compat_intr(pa, cp, channel, interface); 8707 continue; 8708 } 8709 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8710 } 8711 } 8712 8713 void 8714 ixp_setup_channel(struct channel_softc *chp) 8715 { 8716 struct ata_drive_datas *drvp; 8717 int drive, mode; 8718 u_int32_t idedma_ctl; 8719 struct pciide_channel *cp = (struct pciide_channel*)chp; 8720 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8721 int channel = chp->channel; 8722 pcireg_t udma, mdma_timing, pio, pio_timing; 8723 8724 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8725 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8726 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8727 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8728 8729 /* Setup DMA if needed */ 8730 pciide_channel_dma_setup(cp); 8731 8732 idedma_ctl = 0; 8733 8734 /* Per channel settings */ 8735 for (drive = 0; drive < 2; drive++) { 8736 drvp = &chp->ch_drive[drive]; 8737 8738 /* If no drive, skip */ 8739 if ((drvp->drive_flags & DRIVE) == 0) 8740 continue; 8741 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8742 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8743 /* Setup UltraDMA mode */ 8744 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8745 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8746 IXP_SET_MODE(udma, chp->channel, drive, 8747 drvp->UDMA_mode); 8748 mode = drvp->PIO_mode; 8749 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8750 (drvp->drive_flags & DRIVE_DMA) != 0) { 8751 /* Setup multiword DMA mode */ 8752 drvp->drive_flags &= ~DRIVE_UDMA; 8753 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8754 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8755 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8756 ixp_mdma_timings[drvp->DMA_mode]); 8757 8758 /* mode = min(pio, dma + 2) */ 8759 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8760 mode = drvp->PIO_mode; 8761 else 8762 mode = drvp->DMA_mode + 2; 8763 } else { 8764 mode = drvp->PIO_mode; 8765 } 8766 8767 /* Setup PIO mode */ 8768 drvp->PIO_mode = mode; 8769 if (mode < 2) 8770 drvp->DMA_mode = 0; 8771 else 8772 drvp->DMA_mode = mode - 2; 8773 /* 8774 * Set PIO mode and timings 8775 * Linux driver avoids PIO mode 1, let's do it too. 8776 */ 8777 if (drvp->PIO_mode == 1) 8778 drvp->PIO_mode = 0; 8779 8780 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8781 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8782 ixp_pio_timings[drvp->PIO_mode]); 8783 } 8784 8785 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8786 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8787 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8788 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8789 8790 if (idedma_ctl != 0) { 8791 /* Add software bits in status register */ 8792 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8793 IDEDMA_CTL(channel), idedma_ctl); 8794 } 8795 8796 pciide_print_modes(cp); 8797 } 8798 8799 void 8800 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8801 { 8802 struct pciide_channel *cp; 8803 int channel; 8804 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8805 bus_size_t cmdsize, ctlsize; 8806 u_int32_t conf; 8807 8808 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8809 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8810 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8811 8812 printf(": DMA"); 8813 pciide_mapreg_dma(sc, pa); 8814 8815 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8816 WDC_CAPABILITY_MODE; 8817 if (sc->sc_dma_ok) { 8818 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8819 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8820 sc->sc_wdcdev.irqack = pciide_irqack; 8821 } 8822 sc->sc_wdcdev.PIO_cap = 4; 8823 sc->sc_wdcdev.DMA_cap = 2; 8824 sc->sc_wdcdev.UDMA_cap = 6; 8825 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8826 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8827 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8828 8829 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8830 8831 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8832 cp = &sc->pciide_channels[channel]; 8833 8834 if (pciide_chansetup(sc, channel, interface) == 0) 8835 continue; 8836 8837 #if 0 8838 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8839 printf("%s: %s ignored (disabled)\n", 8840 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8841 cp->hw_ok = 0; 8842 continue; 8843 } 8844 #endif 8845 8846 pciide_map_compat_intr(pa, cp, channel, interface); 8847 if (cp->hw_ok == 0) 8848 continue; 8849 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8850 pciide_pci_intr); 8851 if (cp->hw_ok == 0) { 8852 pciide_unmap_compat_intr(pa, cp, channel, interface); 8853 continue; 8854 } 8855 8856 if (pciide_chan_candisable(cp)) { 8857 conf &= ~JMICRON_CHAN_EN(channel); 8858 pciide_unmap_compat_intr(pa, cp, channel, interface); 8859 continue; 8860 } 8861 8862 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8863 } 8864 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8865 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8866 pci_conf_write(sc->sc_pc, sc->sc_tag, JMICRON_CONF, conf); 8867 } 8868 8869 void 8870 jmicron_setup_channel(struct channel_softc *chp) 8871 { 8872 struct ata_drive_datas *drvp; 8873 int drive, mode; 8874 u_int32_t idedma_ctl; 8875 struct pciide_channel *cp = (struct pciide_channel *)chp; 8876 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8877 int channel = chp->channel; 8878 u_int32_t conf; 8879 8880 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8881 8882 /* Setup DMA if needed */ 8883 pciide_channel_dma_setup(cp); 8884 8885 /* Clear all bits for this channel */ 8886 idedma_ctl = 0; 8887 8888 /* Per channel settings */ 8889 for (drive = 0; drive < 2; drive++) { 8890 drvp = &chp->ch_drive[drive]; 8891 8892 /* If no drive, skip */ 8893 if ((drvp->drive_flags & DRIVE) == 0) 8894 continue; 8895 8896 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8897 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8898 /* Setup UltraDMA mode */ 8899 drvp->drive_flags &= ~DRIVE_DMA; 8900 8901 /* see if cable is up to scratch */ 8902 if ((conf & JMICRON_CONF_40PIN) && 8903 (drvp->UDMA_mode > 2)) 8904 drvp->UDMA_mode = 2; 8905 8906 mode = drvp->PIO_mode; 8907 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8908 (drvp->drive_flags & DRIVE_DMA) != 0) { 8909 /* Setup multiword DMA mode */ 8910 drvp->drive_flags &= ~DRIVE_UDMA; 8911 8912 /* mode = min(pio, dma + 2) */ 8913 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8914 mode = drvp->PIO_mode; 8915 else 8916 mode = drvp->DMA_mode + 2; 8917 } else { 8918 mode = drvp->PIO_mode; 8919 goto pio; 8920 } 8921 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8922 8923 pio: 8924 /* Setup PIO mode */ 8925 if (mode <= 2) { 8926 drvp->DMA_mode = 0; 8927 drvp->PIO_mode = 0; 8928 } else { 8929 drvp->PIO_mode = mode; 8930 drvp->DMA_mode = mode - 2; 8931 } 8932 } 8933 8934 if (idedma_ctl != 0) { 8935 /* Add software bits in status register */ 8936 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8937 IDEDMA_CTL(channel), idedma_ctl); 8938 } 8939 8940 pciide_print_modes(cp); 8941 } 8942 8943 void 8944 phison_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8945 { 8946 struct pciide_channel *cp; 8947 int channel; 8948 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8949 bus_size_t cmdsize, ctlsize; 8950 8951 sc->chip_unmap = default_chip_unmap; 8952 8953 printf(": DMA"); 8954 pciide_mapreg_dma(sc, pa); 8955 8956 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8957 WDC_CAPABILITY_MODE; 8958 if (sc->sc_dma_ok) { 8959 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8960 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8961 sc->sc_wdcdev.irqack = pciide_irqack; 8962 } 8963 sc->sc_wdcdev.PIO_cap = 4; 8964 sc->sc_wdcdev.DMA_cap = 2; 8965 sc->sc_wdcdev.UDMA_cap = 5; 8966 sc->sc_wdcdev.set_modes = phison_setup_channel; 8967 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8968 sc->sc_wdcdev.nchannels = 1; 8969 8970 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8971 8972 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8973 cp = &sc->pciide_channels[channel]; 8974 8975 if (pciide_chansetup(sc, channel, interface) == 0) 8976 continue; 8977 8978 pciide_map_compat_intr(pa, cp, channel, interface); 8979 if (cp->hw_ok == 0) 8980 continue; 8981 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8982 pciide_pci_intr); 8983 if (cp->hw_ok == 0) { 8984 pciide_unmap_compat_intr(pa, cp, channel, interface); 8985 continue; 8986 } 8987 8988 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8989 } 8990 } 8991 8992 void 8993 phison_setup_channel(struct channel_softc *chp) 8994 { 8995 struct ata_drive_datas *drvp; 8996 int drive, mode; 8997 u_int32_t idedma_ctl; 8998 struct pciide_channel *cp = (struct pciide_channel *)chp; 8999 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 9000 int channel = chp->channel; 9001 9002 /* Setup DMA if needed */ 9003 pciide_channel_dma_setup(cp); 9004 9005 /* Clear all bits for this channel */ 9006 idedma_ctl = 0; 9007 9008 /* Per channel settings */ 9009 for (drive = 0; drive < 2; drive++) { 9010 drvp = &chp->ch_drive[drive]; 9011 9012 /* If no drive, skip */ 9013 if ((drvp->drive_flags & DRIVE) == 0) 9014 continue; 9015 9016 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 9017 (drvp->drive_flags & DRIVE_UDMA) != 0) { 9018 /* Setup UltraDMA mode */ 9019 drvp->drive_flags &= ~DRIVE_DMA; 9020 mode = drvp->PIO_mode; 9021 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 9022 (drvp->drive_flags & DRIVE_DMA) != 0) { 9023 /* Setup multiword DMA mode */ 9024 drvp->drive_flags &= ~DRIVE_UDMA; 9025 9026 /* mode = min(pio, dma + 2) */ 9027 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 9028 mode = drvp->PIO_mode; 9029 else 9030 mode = drvp->DMA_mode + 2; 9031 } else { 9032 mode = drvp->PIO_mode; 9033 goto pio; 9034 } 9035 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 9036 9037 pio: 9038 /* Setup PIO mode */ 9039 if (mode <= 2) { 9040 drvp->DMA_mode = 0; 9041 drvp->PIO_mode = 0; 9042 } else { 9043 drvp->PIO_mode = mode; 9044 drvp->DMA_mode = mode - 2; 9045 } 9046 } 9047 9048 if (idedma_ctl != 0) { 9049 /* Add software bits in status register */ 9050 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 9051 IDEDMA_CTL(channel), idedma_ctl); 9052 } 9053 9054 pciide_print_modes(cp); 9055 } 9056 9057 void 9058 sch_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 9059 { 9060 struct pciide_channel *cp; 9061 int channel; 9062 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 9063 bus_size_t cmdsize, ctlsize; 9064 9065 printf(": DMA"); 9066 pciide_mapreg_dma(sc, pa); 9067 9068 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 9069 WDC_CAPABILITY_MODE; 9070 if (sc->sc_dma_ok) { 9071 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 9072 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 9073 sc->sc_wdcdev.irqack = pciide_irqack; 9074 } 9075 sc->sc_wdcdev.PIO_cap = 4; 9076 sc->sc_wdcdev.DMA_cap = 2; 9077 sc->sc_wdcdev.UDMA_cap = 5; 9078 sc->sc_wdcdev.set_modes = sch_setup_channel; 9079 sc->sc_wdcdev.channels = sc->wdc_chanarray; 9080 sc->sc_wdcdev.nchannels = 1; 9081 9082 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 9083 9084 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 9085 cp = &sc->pciide_channels[channel]; 9086 9087 if (pciide_chansetup(sc, channel, interface) == 0) 9088 continue; 9089 9090 pciide_map_compat_intr(pa, cp, channel, interface); 9091 if (cp->hw_ok == 0) 9092 continue; 9093 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 9094 pciide_pci_intr); 9095 if (cp->hw_ok == 0) { 9096 pciide_unmap_compat_intr(pa, cp, channel, interface); 9097 continue; 9098 } 9099 9100 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 9101 } 9102 } 9103 9104 void 9105 sch_setup_channel(struct channel_softc *chp) 9106 { 9107 struct ata_drive_datas *drvp; 9108 int drive, mode; 9109 u_int32_t tim, timaddr; 9110 struct pciide_channel *cp = (struct pciide_channel *)chp; 9111 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 9112 9113 /* Setup DMA if needed */ 9114 pciide_channel_dma_setup(cp); 9115 9116 /* Per channel settings */ 9117 for (drive = 0; drive < 2; drive++) { 9118 drvp = &chp->ch_drive[drive]; 9119 9120 /* If no drive, skip */ 9121 if ((drvp->drive_flags & DRIVE) == 0) 9122 continue; 9123 9124 timaddr = (drive == 0) ? SCH_D0TIM : SCH_D1TIM; 9125 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, timaddr); 9126 tim &= ~SCH_TIM_MASK; 9127 9128 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 9129 (drvp->drive_flags & DRIVE_UDMA) != 0) { 9130 /* Setup UltraDMA mode */ 9131 drvp->drive_flags &= ~DRIVE_DMA; 9132 9133 mode = drvp->PIO_mode; 9134 tim |= (drvp->UDMA_mode << 16) | SCH_TIM_SYNCDMA; 9135 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 9136 (drvp->drive_flags & DRIVE_DMA) != 0) { 9137 /* Setup multiword DMA mode */ 9138 drvp->drive_flags &= ~DRIVE_UDMA; 9139 9140 tim &= ~SCH_TIM_SYNCDMA; 9141 9142 /* mode = min(pio, dma + 2) */ 9143 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 9144 mode = drvp->PIO_mode; 9145 else 9146 mode = drvp->DMA_mode + 2; 9147 } else { 9148 mode = drvp->PIO_mode; 9149 goto pio; 9150 } 9151 9152 pio: 9153 /* Setup PIO mode */ 9154 if (mode <= 2) { 9155 drvp->DMA_mode = 0; 9156 drvp->PIO_mode = 0; 9157 } else { 9158 drvp->PIO_mode = mode; 9159 drvp->DMA_mode = mode - 2; 9160 } 9161 tim |= (drvp->DMA_mode << 8) | (drvp->PIO_mode); 9162 pci_conf_write(sc->sc_pc, sc->sc_tag, timaddr, tim); 9163 } 9164 9165 pciide_print_modes(cp); 9166 } 9167 9168 void 9169 rdc_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 9170 { 9171 struct pciide_channel *cp; 9172 int channel; 9173 u_int32_t patr; 9174 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 9175 bus_size_t cmdsize, ctlsize; 9176 9177 printf(": DMA"); 9178 pciide_mapreg_dma(sc, pa); 9179 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 9180 if (sc->sc_dma_ok) { 9181 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 9182 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 9183 sc->sc_wdcdev.irqack = pciide_irqack; 9184 sc->sc_wdcdev.dma_init = pciide_dma_init; 9185 } 9186 sc->sc_wdcdev.PIO_cap = 4; 9187 sc->sc_wdcdev.DMA_cap = 2; 9188 sc->sc_wdcdev.UDMA_cap = 5; 9189 sc->sc_wdcdev.set_modes = rdc_setup_channel; 9190 sc->sc_wdcdev.channels = sc->wdc_chanarray; 9191 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 9192 9193 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 9194 9195 WDCDEBUG_PRINT(("rdc_chip_map: old PATR=0x%x, " 9196 "PSD1ATR=0x%x, UDCCR=0x%x, IIOCR=0x%x\n", 9197 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR), 9198 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR), 9199 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR), 9200 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR)), 9201 DEBUG_PROBE); 9202 9203 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 9204 cp = &sc->pciide_channels[channel]; 9205 9206 if (pciide_chansetup(sc, channel, interface) == 0) 9207 continue; 9208 patr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR); 9209 if ((patr & RDCIDE_PATR_EN(channel)) == 0) { 9210 printf("%s: %s ignored (disabled)\n", 9211 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 9212 cp->hw_ok = 0; 9213 continue; 9214 } 9215 pciide_map_compat_intr(pa, cp, channel, interface); 9216 if (cp->hw_ok == 0) 9217 continue; 9218 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 9219 pciide_pci_intr); 9220 if (cp->hw_ok == 0) 9221 goto next; 9222 if (pciide_chan_candisable(cp)) { 9223 patr &= ~RDCIDE_PATR_EN(channel); 9224 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PATR, 9225 patr); 9226 } 9227 if (cp->hw_ok == 0) 9228 goto next; 9229 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 9230 next: 9231 if (cp->hw_ok == 0) 9232 pciide_unmap_compat_intr(pa, cp, channel, interface); 9233 } 9234 9235 WDCDEBUG_PRINT(("rdc_chip_map: PATR=0x%x, " 9236 "PSD1ATR=0x%x, UDCCR=0x%x, IIOCR=0x%x\n", 9237 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR), 9238 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR), 9239 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR), 9240 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR)), 9241 DEBUG_PROBE); 9242 } 9243 9244 void 9245 rdc_setup_channel(struct channel_softc *chp) 9246 { 9247 u_int8_t drive; 9248 u_int32_t patr, psd1atr, udccr, iiocr; 9249 struct pciide_channel *cp = (struct pciide_channel *)chp; 9250 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 9251 struct ata_drive_datas *drvp; 9252 9253 patr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR); 9254 psd1atr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR); 9255 udccr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR); 9256 iiocr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR); 9257 9258 /* setup DMA */ 9259 pciide_channel_dma_setup(cp); 9260 9261 /* clear modes */ 9262 patr = patr & (RDCIDE_PATR_EN(0) | RDCIDE_PATR_EN(1)); 9263 psd1atr &= ~RDCIDE_PSD1ATR_SETUP_MASK(chp->channel); 9264 psd1atr &= ~RDCIDE_PSD1ATR_HOLD_MASK(chp->channel); 9265 for (drive = 0; drive < 2; drive++) { 9266 udccr &= ~RDCIDE_UDCCR_EN(chp->channel, drive); 9267 udccr &= ~RDCIDE_UDCCR_TIM_MASK(chp->channel, drive); 9268 iiocr &= ~RDCIDE_IIOCR_CLK_MASK(chp->channel, drive); 9269 } 9270 /* now setup modes */ 9271 for (drive = 0; drive < 2; drive++) { 9272 drvp = &cp->wdc_channel.ch_drive[drive]; 9273 if ((drvp->drive_flags & DRIVE) == 0) 9274 continue; 9275 if (drvp->drive_flags & DRIVE_ATAPI) 9276 patr |= RDCIDE_PATR_ATA(chp->channel, drive); 9277 if (drive == 0) { 9278 patr |= RDCIDE_PATR_SETUP(rdcide_setup[drvp->PIO_mode], 9279 chp->channel); 9280 patr |= RDCIDE_PATR_HOLD(rdcide_hold[drvp->PIO_mode], 9281 chp->channel); 9282 } else { 9283 patr |= RDCIDE_PATR_DEV1_TEN(chp->channel); 9284 psd1atr |= RDCIDE_PSD1ATR_SETUP( 9285 rdcide_setup[drvp->PIO_mode], 9286 chp->channel); 9287 psd1atr |= RDCIDE_PSD1ATR_HOLD( 9288 rdcide_hold[drvp->PIO_mode], 9289 chp->channel); 9290 } 9291 if (drvp->PIO_mode > 0) { 9292 patr |= RDCIDE_PATR_FTIM(chp->channel, drive); 9293 patr |= RDCIDE_PATR_IORDY(chp->channel, drive); 9294 } 9295 if (drvp->drive_flags & DRIVE_DMA) 9296 patr |= RDCIDE_PATR_DMAEN(chp->channel, drive); 9297 if ((drvp->drive_flags & DRIVE_UDMA) == 0) 9298 continue; 9299 9300 if ((iiocr & RDCIDE_IIOCR_CABLE(chp->channel, drive)) == 0 9301 && drvp->UDMA_mode > 2) 9302 drvp->UDMA_mode = 2; 9303 udccr |= RDCIDE_UDCCR_EN(chp->channel, drive); 9304 udccr |= RDCIDE_UDCCR_TIM(rdcide_udmatim[drvp->UDMA_mode], 9305 chp->channel, drive); 9306 iiocr |= RDCIDE_IIOCR_CLK(rdcide_udmaclk[drvp->UDMA_mode], 9307 chp->channel, drive); 9308 } 9309 9310 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PATR, patr); 9311 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR, psd1atr); 9312 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR, udccr); 9313 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR, iiocr); 9314 } 9315