1 /* $OpenBSD: pciide.c,v 1.293 2009/02/07 02:09:01 jsg Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 /* 36 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by Christopher G. Demetriou 49 * for the NetBSD Project. 50 * 4. The name of the author may not be used to endorse or promote products 51 * derived from this software without specific prior written permission 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 57 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 58 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 62 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 /* 66 * PCI IDE controller driver. 67 * 68 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 69 * sys/dev/pci/ppb.c, revision 1.16). 70 * 71 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 72 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 73 * 5/16/94" from the PCI SIG. 74 * 75 */ 76 77 #define DEBUG_DMA 0x01 78 #define DEBUG_XFERS 0x02 79 #define DEBUG_FUNCS 0x08 80 #define DEBUG_PROBE 0x10 81 82 #ifdef WDCDEBUG 83 #ifndef WDCDEBUG_PCIIDE_MASK 84 #define WDCDEBUG_PCIIDE_MASK 0x00 85 #endif 86 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 87 #define WDCDEBUG_PRINT(args, level) do { \ 88 if ((wdcdebug_pciide_mask & (level)) != 0) \ 89 printf args; \ 90 } while (0) 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <machine/bus.h> 100 #include <machine/endian.h> 101 102 #include <dev/ata/atavar.h> 103 #include <dev/ata/satareg.h> 104 #include <dev/ic/wdcreg.h> 105 #include <dev/ic/wdcvar.h> 106 107 #include <dev/pci/pcireg.h> 108 #include <dev/pci/pcivar.h> 109 #include <dev/pci/pcidevs.h> 110 111 #include <dev/pci/pciidereg.h> 112 #include <dev/pci/pciidevar.h> 113 #include <dev/pci/pciide_piix_reg.h> 114 #include <dev/pci/pciide_amd_reg.h> 115 #include <dev/pci/pciide_apollo_reg.h> 116 #include <dev/pci/pciide_cmd_reg.h> 117 #include <dev/pci/pciide_sii3112_reg.h> 118 #include <dev/pci/pciide_cy693_reg.h> 119 #include <dev/pci/pciide_sis_reg.h> 120 #include <dev/pci/pciide_acer_reg.h> 121 #include <dev/pci/pciide_pdc202xx_reg.h> 122 #include <dev/pci/pciide_opti_reg.h> 123 #include <dev/pci/pciide_hpt_reg.h> 124 #include <dev/pci/pciide_acard_reg.h> 125 #include <dev/pci/pciide_natsemi_reg.h> 126 #include <dev/pci/pciide_nforce_reg.h> 127 #include <dev/pci/pciide_i31244_reg.h> 128 #include <dev/pci/pciide_ite_reg.h> 129 #include <dev/pci/pciide_ixp_reg.h> 130 #include <dev/pci/pciide_svwsata_reg.h> 131 #include <dev/pci/pciide_jmicron_reg.h> 132 #include <dev/pci/cy82c693var.h> 133 134 /* functions for reading/writing 8-bit PCI registers */ 135 136 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 137 int); 138 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 139 int, u_int8_t); 140 141 u_int8_t 142 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 143 { 144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 145 ((reg & 0x03) * 8) & 0xff); 146 } 147 148 void 149 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 150 { 151 pcireg_t pcival; 152 153 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 154 pcival &= ~(0xff << ((reg & 0x03) * 8)); 155 pcival |= (val << ((reg & 0x03) * 8)); 156 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 157 } 158 159 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 160 161 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 162 void sata_setup_channel(struct channel_softc *); 163 164 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 165 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 166 void piix_setup_channel(struct channel_softc *); 167 void piix3_4_setup_channel(struct channel_softc *); 168 void piix_timing_debug(struct pciide_softc *); 169 170 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 171 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 172 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 173 174 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 175 void amd756_setup_channel(struct channel_softc *); 176 177 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 178 void apollo_setup_channel(struct channel_softc *); 179 180 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 181 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 182 void cmd0643_9_setup_channel(struct channel_softc *); 183 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 184 void cmd680_setup_channel(struct channel_softc *); 185 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 186 void cmd_channel_map(struct pci_attach_args *, 187 struct pciide_softc *, int); 188 int cmd_pci_intr(void *); 189 void cmd646_9_irqack(struct channel_softc *); 190 191 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 192 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 193 void sii3112_setup_channel(struct channel_softc *); 194 void sii3112_drv_probe(struct channel_softc *); 195 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 196 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 197 int sii3114_chansetup(struct pciide_softc *, int); 198 void sii3114_mapchan(struct pciide_channel *); 199 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 200 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 201 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 202 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 203 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 204 205 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 206 void cy693_setup_channel(struct channel_softc *); 207 208 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 209 void sis_setup_channel(struct channel_softc *); 210 void sis96x_setup_channel(struct channel_softc *); 211 int sis_hostbr_match(struct pci_attach_args *); 212 int sis_south_match(struct pci_attach_args *); 213 214 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 215 void natsemi_setup_channel(struct channel_softc *); 216 int natsemi_pci_intr(void *); 217 void natsemi_irqack(struct channel_softc *); 218 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 219 void ns_scx200_setup_channel(struct channel_softc *); 220 221 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 222 void acer_setup_channel(struct channel_softc *); 223 int acer_pci_intr(void *); 224 225 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 226 void pdc202xx_setup_channel(struct channel_softc *); 227 void pdc20268_setup_channel(struct channel_softc *); 228 int pdc202xx_pci_intr(void *); 229 int pdc20265_pci_intr(void *); 230 void pdc20262_dma_start(void *, int, int); 231 int pdc20262_dma_finish(void *, int, int, int); 232 233 u_int8_t pdc268_config_read(struct channel_softc *, int); 234 235 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 236 void pdc203xx_setup_channel(struct channel_softc *); 237 int pdc203xx_pci_intr(void *); 238 void pdc203xx_irqack(struct channel_softc *); 239 void pdc203xx_dma_start(void *,int ,int); 240 int pdc203xx_dma_finish(void *, int, int, int); 241 int pdc205xx_pci_intr(void *); 242 void pdc205xx_do_reset(struct channel_softc *); 243 void pdc205xx_drv_probe(struct channel_softc *); 244 245 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 246 void opti_setup_channel(struct channel_softc *); 247 248 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 249 void hpt_setup_channel(struct channel_softc *); 250 int hpt_pci_intr(void *); 251 252 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 253 void acard_setup_channel(struct channel_softc *); 254 255 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 256 void serverworks_setup_channel(struct channel_softc *); 257 int serverworks_pci_intr(void *); 258 259 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 260 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 261 void svwsata_mapchan(struct pciide_channel *); 262 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 263 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 264 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 265 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 266 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 267 void svwsata_drv_probe(struct channel_softc *); 268 269 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 270 void nforce_setup_channel(struct channel_softc *); 271 int nforce_pci_intr(void *); 272 273 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 274 275 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 276 void ite_setup_channel(struct channel_softc *); 277 278 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 279 void ixp_setup_channel(struct channel_softc *); 280 281 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 282 void jmicron_setup_channel(struct channel_softc *); 283 284 struct pciide_product_desc { 285 u_int32_t ide_product; 286 u_short ide_flags; 287 /* map and setup chip, probe drives */ 288 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 289 }; 290 291 /* Flags for ide_flags */ 292 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 293 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 294 295 /* Default product description for devices not known from this controller */ 296 const struct pciide_product_desc default_product_desc = { 297 0, /* Generic PCI IDE controller */ 298 0, 299 default_chip_map 300 }; 301 302 const struct pciide_product_desc pciide_intel_products[] = { 303 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 304 0, 305 artisea_chip_map 306 }, 307 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 308 0, 309 default_chip_map 310 }, 311 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 312 0, 313 piix_chip_map 314 }, 315 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 316 0, 317 piix_chip_map 318 }, 319 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 320 0, 321 piix_chip_map 322 }, 323 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 324 0, 325 piix_chip_map 326 }, 327 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 328 0, 329 piix_chip_map 330 }, 331 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 332 0, 333 piix_chip_map 334 }, 335 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 336 0, 337 piix_chip_map 338 }, 339 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 340 0, 341 piix_chip_map 342 }, 343 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 344 0, 345 piix_chip_map 346 }, 347 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 348 0, 349 piix_chip_map 350 }, 351 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 352 0, 353 piix_chip_map 354 }, 355 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 356 0, 357 piix_chip_map 358 }, 359 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 360 0, 361 piix_chip_map 362 }, 363 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 364 0, 365 piix_chip_map 366 }, 367 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 368 0, 369 piix_chip_map 370 }, 371 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 372 0, 373 piix_chip_map 374 }, 375 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 376 0, 377 piix_chip_map 378 }, 379 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 380 0, 381 piix_chip_map 382 }, 383 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 384 0, 385 piixsata_chip_map 386 }, 387 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 388 0, 389 piixsata_chip_map 390 }, 391 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 392 0, 393 piix_chip_map 394 }, 395 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 396 0, 397 piixsata_chip_map 398 }, 399 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 400 0, 401 piixsata_chip_map 402 }, 403 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 404 0, 405 piix_chip_map 406 }, 407 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 408 0, 409 piix_chip_map 410 }, 411 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 412 0, 413 piixsata_chip_map 414 }, 415 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 416 0, 417 piixsata_chip_map 418 }, 419 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 420 0, 421 piixsata_chip_map 422 }, 423 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 424 0, 425 piix_chip_map 426 }, 427 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 428 0, 429 piixsata_chip_map 430 }, 431 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 432 0, 433 piixsata_chip_map 434 }, 435 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 436 0, 437 piixsata_chip_map 438 }, 439 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 440 0, 441 piixsata_chip_map 442 }, 443 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 444 0, 445 piixsata_chip_map 446 }, 447 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 448 0, 449 piixsata_chip_map 450 }, 451 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 452 0, 453 piixsata_chip_map 454 }, 455 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 456 0, 457 piixsata_chip_map 458 }, 459 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 460 0, 461 piixsata_chip_map 462 }, 463 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 464 0, 465 piixsata_chip_map 466 }, 467 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 468 0, 469 piixsata_chip_map 470 }, 471 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 472 0, 473 piixsata_chip_map 474 }, 475 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 476 0, 477 piixsata_chip_map 478 }, 479 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 480 0, 481 piixsata_chip_map 482 }, 483 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 484 0, 485 piix_chip_map 486 }, 487 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 488 0, 489 piixsata_chip_map 490 }, 491 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 492 0, 493 piixsata_chip_map 494 }, 495 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 496 0, 497 piixsata_chip_map 498 }, 499 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 500 0, 501 piixsata_chip_map 502 }, 503 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 504 0, 505 piixsata_chip_map 506 }, 507 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 508 0, 509 piixsata_chip_map 510 }, 511 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 512 0, 513 piixsata_chip_map 514 } 515 }; 516 517 const struct pciide_product_desc pciide_amd_products[] = { 518 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 519 0, 520 amd756_chip_map 521 }, 522 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 523 0, 524 amd756_chip_map 525 }, 526 { PCI_PRODUCT_AMD_PBC768_IDE, 527 0, 528 amd756_chip_map 529 }, 530 { PCI_PRODUCT_AMD_8111_IDE, 531 0, 532 amd756_chip_map 533 }, 534 { PCI_PRODUCT_AMD_CS5536_IDE, 535 0, 536 amd756_chip_map 537 } 538 }; 539 540 #ifdef notyet 541 const struct pciide_product_desc pciide_opti_products[] = { 542 543 { PCI_PRODUCT_OPTI_82C621, 544 0, 545 opti_chip_map 546 }, 547 { PCI_PRODUCT_OPTI_82C568, 548 0, 549 opti_chip_map 550 }, 551 { PCI_PRODUCT_OPTI_82D568, 552 0, 553 opti_chip_map 554 } 555 }; 556 #endif 557 558 const struct pciide_product_desc pciide_cmd_products[] = { 559 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 560 0, 561 cmd_chip_map 562 }, 563 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 564 0, 565 cmd0643_9_chip_map 566 }, 567 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 568 0, 569 cmd0643_9_chip_map 570 }, 571 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 572 0, 573 cmd0643_9_chip_map 574 }, 575 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 576 0, 577 cmd0643_9_chip_map 578 }, 579 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 580 IDE_PCI_CLASS_OVERRIDE, 581 cmd680_chip_map 582 }, 583 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 584 0, 585 sii3112_chip_map 586 }, 587 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 588 0, 589 sii3112_chip_map 590 }, 591 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 592 0, 593 sii3112_chip_map 594 }, 595 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 596 0, 597 sii3114_chip_map 598 } 599 }; 600 601 const struct pciide_product_desc pciide_via_products[] = { 602 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 603 0, 604 apollo_chip_map 605 }, 606 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 607 0, 608 apollo_chip_map 609 }, 610 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 611 IDE_PCI_CLASS_OVERRIDE, 612 apollo_chip_map 613 }, 614 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 615 0, 616 apollo_chip_map 617 }, 618 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 619 0, 620 apollo_chip_map 621 }, 622 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 623 0, 624 sata_chip_map 625 }, 626 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 627 0, 628 sata_chip_map 629 }, 630 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 631 0, 632 sata_chip_map 633 }, 634 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 635 0, 636 sata_chip_map 637 }, 638 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 639 0, 640 sata_chip_map 641 }, 642 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 643 0, 644 sata_chip_map 645 } 646 }; 647 648 const struct pciide_product_desc pciide_cypress_products[] = { 649 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 650 IDE_16BIT_IOSPACE, 651 cy693_chip_map 652 } 653 }; 654 655 const struct pciide_product_desc pciide_sis_products[] = { 656 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 657 0, 658 sis_chip_map 659 }, 660 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 661 0, 662 sata_chip_map 663 }, 664 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 665 0, 666 sata_chip_map 667 }, 668 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 669 0, 670 sata_chip_map 671 } 672 }; 673 674 /* 675 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 676 * has been banished to the MD i386 pciide_machdep 677 */ 678 const struct pciide_product_desc pciide_natsemi_products[] = { 679 #ifdef __i386__ 680 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 681 0, 682 gcsc_chip_map 683 }, 684 #endif 685 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 686 0, 687 natsemi_chip_map 688 }, 689 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 690 0, 691 ns_scx200_chip_map 692 } 693 }; 694 695 const struct pciide_product_desc pciide_acer_products[] = { 696 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 697 0, 698 acer_chip_map 699 } 700 }; 701 702 const struct pciide_product_desc pciide_triones_products[] = { 703 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 704 IDE_PCI_CLASS_OVERRIDE, 705 hpt_chip_map, 706 }, 707 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 708 IDE_PCI_CLASS_OVERRIDE, 709 hpt_chip_map 710 }, 711 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 712 IDE_PCI_CLASS_OVERRIDE, 713 hpt_chip_map 714 }, 715 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 716 IDE_PCI_CLASS_OVERRIDE, 717 hpt_chip_map 718 }, 719 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 720 IDE_PCI_CLASS_OVERRIDE, 721 hpt_chip_map 722 } 723 }; 724 725 const struct pciide_product_desc pciide_promise_products[] = { 726 { PCI_PRODUCT_PROMISE_PDC20246, 727 IDE_PCI_CLASS_OVERRIDE, 728 pdc202xx_chip_map, 729 }, 730 { PCI_PRODUCT_PROMISE_PDC20262, 731 IDE_PCI_CLASS_OVERRIDE, 732 pdc202xx_chip_map, 733 }, 734 { PCI_PRODUCT_PROMISE_PDC20265, 735 IDE_PCI_CLASS_OVERRIDE, 736 pdc202xx_chip_map, 737 }, 738 { PCI_PRODUCT_PROMISE_PDC20267, 739 IDE_PCI_CLASS_OVERRIDE, 740 pdc202xx_chip_map, 741 }, 742 { PCI_PRODUCT_PROMISE_PDC20268, 743 IDE_PCI_CLASS_OVERRIDE, 744 pdc202xx_chip_map, 745 }, 746 { PCI_PRODUCT_PROMISE_PDC20268R, 747 IDE_PCI_CLASS_OVERRIDE, 748 pdc202xx_chip_map, 749 }, 750 { PCI_PRODUCT_PROMISE_PDC20269, 751 IDE_PCI_CLASS_OVERRIDE, 752 pdc202xx_chip_map, 753 }, 754 { PCI_PRODUCT_PROMISE_PDC20271, 755 IDE_PCI_CLASS_OVERRIDE, 756 pdc202xx_chip_map, 757 }, 758 { PCI_PRODUCT_PROMISE_PDC20275, 759 IDE_PCI_CLASS_OVERRIDE, 760 pdc202xx_chip_map, 761 }, 762 { PCI_PRODUCT_PROMISE_PDC20276, 763 IDE_PCI_CLASS_OVERRIDE, 764 pdc202xx_chip_map, 765 }, 766 { PCI_PRODUCT_PROMISE_PDC20277, 767 IDE_PCI_CLASS_OVERRIDE, 768 pdc202xx_chip_map, 769 }, 770 { PCI_PRODUCT_PROMISE_PDC20318, 771 IDE_PCI_CLASS_OVERRIDE, 772 pdcsata_chip_map, 773 }, 774 { PCI_PRODUCT_PROMISE_PDC20319, 775 IDE_PCI_CLASS_OVERRIDE, 776 pdcsata_chip_map, 777 }, 778 { PCI_PRODUCT_PROMISE_PDC20371, 779 IDE_PCI_CLASS_OVERRIDE, 780 pdcsata_chip_map, 781 }, 782 { PCI_PRODUCT_PROMISE_PDC20375, 783 IDE_PCI_CLASS_OVERRIDE, 784 pdcsata_chip_map, 785 }, 786 { PCI_PRODUCT_PROMISE_PDC20376, 787 IDE_PCI_CLASS_OVERRIDE, 788 pdcsata_chip_map, 789 }, 790 { PCI_PRODUCT_PROMISE_PDC20377, 791 IDE_PCI_CLASS_OVERRIDE, 792 pdcsata_chip_map, 793 }, 794 { PCI_PRODUCT_PROMISE_PDC20378, 795 IDE_PCI_CLASS_OVERRIDE, 796 pdcsata_chip_map, 797 }, 798 { PCI_PRODUCT_PROMISE_PDC20379, 799 IDE_PCI_CLASS_OVERRIDE, 800 pdcsata_chip_map, 801 }, 802 { PCI_PRODUCT_PROMISE_PDC40518, 803 IDE_PCI_CLASS_OVERRIDE, 804 pdcsata_chip_map, 805 }, 806 { PCI_PRODUCT_PROMISE_PDC40519, 807 IDE_PCI_CLASS_OVERRIDE, 808 pdcsata_chip_map, 809 }, 810 { PCI_PRODUCT_PROMISE_PDC40718, 811 IDE_PCI_CLASS_OVERRIDE, 812 pdcsata_chip_map, 813 }, 814 { PCI_PRODUCT_PROMISE_PDC40719, 815 IDE_PCI_CLASS_OVERRIDE, 816 pdcsata_chip_map, 817 }, 818 { PCI_PRODUCT_PROMISE_PDC40779, 819 IDE_PCI_CLASS_OVERRIDE, 820 pdcsata_chip_map, 821 }, 822 { PCI_PRODUCT_PROMISE_PDC20571, 823 IDE_PCI_CLASS_OVERRIDE, 824 pdcsata_chip_map, 825 }, 826 { PCI_PRODUCT_PROMISE_PDC20575, 827 IDE_PCI_CLASS_OVERRIDE, 828 pdcsata_chip_map, 829 }, 830 { PCI_PRODUCT_PROMISE_PDC20579, 831 IDE_PCI_CLASS_OVERRIDE, 832 pdcsata_chip_map, 833 }, 834 { PCI_PRODUCT_PROMISE_PDC20771, 835 IDE_PCI_CLASS_OVERRIDE, 836 pdcsata_chip_map, 837 }, 838 { PCI_PRODUCT_PROMISE_PDC20775, 839 IDE_PCI_CLASS_OVERRIDE, 840 pdcsata_chip_map, 841 } 842 }; 843 844 const struct pciide_product_desc pciide_acard_products[] = { 845 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 846 IDE_PCI_CLASS_OVERRIDE, 847 acard_chip_map, 848 }, 849 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 850 IDE_PCI_CLASS_OVERRIDE, 851 acard_chip_map, 852 }, 853 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 854 IDE_PCI_CLASS_OVERRIDE, 855 acard_chip_map, 856 }, 857 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 858 IDE_PCI_CLASS_OVERRIDE, 859 acard_chip_map, 860 }, 861 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 862 IDE_PCI_CLASS_OVERRIDE, 863 acard_chip_map, 864 } 865 }; 866 867 const struct pciide_product_desc pciide_serverworks_products[] = { 868 { PCI_PRODUCT_RCC_OSB4_IDE, 869 0, 870 serverworks_chip_map, 871 }, 872 { PCI_PRODUCT_RCC_CSB5_IDE, 873 0, 874 serverworks_chip_map, 875 }, 876 { PCI_PRODUCT_RCC_CSB6_IDE, 877 0, 878 serverworks_chip_map, 879 }, 880 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 881 0, 882 serverworks_chip_map, 883 }, 884 { PCI_PRODUCT_RCC_HT_1000_IDE, 885 0, 886 serverworks_chip_map, 887 }, 888 { PCI_PRODUCT_RCC_K2_SATA, 889 0, 890 svwsata_chip_map, 891 }, 892 { PCI_PRODUCT_RCC_FRODO4_SATA, 893 0, 894 svwsata_chip_map, 895 }, 896 { PCI_PRODUCT_RCC_FRODO8_SATA, 897 0, 898 svwsata_chip_map, 899 }, 900 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 901 0, 902 svwsata_chip_map, 903 }, 904 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 905 0, 906 svwsata_chip_map, 907 } 908 }; 909 910 const struct pciide_product_desc pciide_nvidia_products[] = { 911 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 912 0, 913 nforce_chip_map 914 }, 915 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 916 0, 917 nforce_chip_map 918 }, 919 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 920 0, 921 nforce_chip_map 922 }, 923 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 924 0, 925 nforce_chip_map 926 }, 927 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 928 0, 929 nforce_chip_map 930 }, 931 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 932 0, 933 nforce_chip_map 934 }, 935 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 936 0, 937 nforce_chip_map 938 }, 939 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 940 0, 941 nforce_chip_map 942 }, 943 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 944 0, 945 nforce_chip_map 946 }, 947 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 948 0, 949 nforce_chip_map 950 }, 951 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 952 0, 953 nforce_chip_map 954 }, 955 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 956 0, 957 nforce_chip_map 958 }, 959 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 960 0, 961 nforce_chip_map 962 }, 963 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 964 0, 965 nforce_chip_map 966 }, 967 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 968 0, 969 sata_chip_map 970 }, 971 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 972 0, 973 sata_chip_map 974 }, 975 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 976 0, 977 sata_chip_map 978 }, 979 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 980 0, 981 sata_chip_map 982 }, 983 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 984 0, 985 sata_chip_map 986 }, 987 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 988 0, 989 sata_chip_map 990 }, 991 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 992 0, 993 sata_chip_map 994 }, 995 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 996 0, 997 sata_chip_map 998 }, 999 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1000 0, 1001 sata_chip_map 1002 }, 1003 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1004 0, 1005 sata_chip_map 1006 }, 1007 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1008 0, 1009 sata_chip_map 1010 }, 1011 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1012 0, 1013 sata_chip_map 1014 }, 1015 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1016 0, 1017 sata_chip_map 1018 }, 1019 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1020 0, 1021 sata_chip_map 1022 }, 1023 { PCI_PRODUCT_NVIDIA_MCP65_SATA, 1024 0, 1025 sata_chip_map 1026 }, 1027 { PCI_PRODUCT_NVIDIA_MCP65_SATA2, 1028 0, 1029 sata_chip_map 1030 }, 1031 { PCI_PRODUCT_NVIDIA_MCP65_SATA3, 1032 0, 1033 sata_chip_map 1034 }, 1035 { PCI_PRODUCT_NVIDIA_MCP65_SATA4, 1036 0, 1037 sata_chip_map 1038 }, 1039 { PCI_PRODUCT_NVIDIA_MCP67_SATA, 1040 0, 1041 sata_chip_map 1042 }, 1043 { PCI_PRODUCT_NVIDIA_MCP67_SATA2, 1044 0, 1045 sata_chip_map 1046 }, 1047 { PCI_PRODUCT_NVIDIA_MCP67_SATA3, 1048 0, 1049 sata_chip_map 1050 }, 1051 { PCI_PRODUCT_NVIDIA_MCP67_SATA4, 1052 0, 1053 sata_chip_map 1054 }, 1055 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1056 0, 1057 sata_chip_map 1058 }, 1059 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1060 0, 1061 sata_chip_map 1062 }, 1063 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1064 0, 1065 sata_chip_map 1066 }, 1067 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1068 0, 1069 sata_chip_map 1070 } 1071 }; 1072 1073 const struct pciide_product_desc pciide_ite_products[] = { 1074 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1075 IDE_PCI_CLASS_OVERRIDE, 1076 ite_chip_map 1077 }, 1078 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1079 IDE_PCI_CLASS_OVERRIDE, 1080 ite_chip_map 1081 } 1082 }; 1083 1084 const struct pciide_product_desc pciide_ati_products[] = { 1085 { PCI_PRODUCT_ATI_SB200_IDE, 1086 0, 1087 ixp_chip_map 1088 }, 1089 { PCI_PRODUCT_ATI_SB300_IDE, 1090 0, 1091 ixp_chip_map 1092 }, 1093 { PCI_PRODUCT_ATI_SB400_IDE, 1094 0, 1095 ixp_chip_map 1096 }, 1097 { PCI_PRODUCT_ATI_SB600_IDE, 1098 0, 1099 ixp_chip_map 1100 }, 1101 { PCI_PRODUCT_ATI_SB700_IDE, 1102 0, 1103 ixp_chip_map 1104 }, 1105 { PCI_PRODUCT_ATI_SB300_SATA, 1106 0, 1107 sii3112_chip_map 1108 }, 1109 { PCI_PRODUCT_ATI_SB400_SATA_1, 1110 0, 1111 sii3112_chip_map 1112 }, 1113 { PCI_PRODUCT_ATI_SB400_SATA_2, 1114 0, 1115 sii3112_chip_map 1116 } 1117 }; 1118 1119 const struct pciide_product_desc pciide_jmicron_products[] = { 1120 { PCI_PRODUCT_JMICRON_JMB361, 1121 0, 1122 jmicron_chip_map 1123 }, 1124 { PCI_PRODUCT_JMICRON_JMB363, 1125 0, 1126 jmicron_chip_map 1127 }, 1128 { PCI_PRODUCT_JMICRON_JMB365, 1129 0, 1130 jmicron_chip_map 1131 }, 1132 { PCI_PRODUCT_JMICRON_JMB366, 1133 0, 1134 jmicron_chip_map 1135 }, 1136 { PCI_PRODUCT_JMICRON_JMB368, 1137 0, 1138 jmicron_chip_map 1139 } 1140 }; 1141 1142 struct pciide_vendor_desc { 1143 u_int32_t ide_vendor; 1144 const struct pciide_product_desc *ide_products; 1145 int ide_nproducts; 1146 }; 1147 1148 const struct pciide_vendor_desc pciide_vendors[] = { 1149 { PCI_VENDOR_INTEL, pciide_intel_products, 1150 sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) }, 1151 { PCI_VENDOR_AMD, pciide_amd_products, 1152 sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) }, 1153 #ifdef notyet 1154 { PCI_VENDOR_OPTI, pciide_opti_products, 1155 sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) }, 1156 #endif 1157 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1158 sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) }, 1159 { PCI_VENDOR_VIATECH, pciide_via_products, 1160 sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) }, 1161 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1162 sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) }, 1163 { PCI_VENDOR_SIS, pciide_sis_products, 1164 sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) }, 1165 { PCI_VENDOR_NS, pciide_natsemi_products, 1166 sizeof(pciide_natsemi_products)/sizeof(pciide_natsemi_products[0]) }, 1167 { PCI_VENDOR_ALI, pciide_acer_products, 1168 sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) }, 1169 { PCI_VENDOR_TRIONES, pciide_triones_products, 1170 sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) }, 1171 { PCI_VENDOR_ACARD, pciide_acard_products, 1172 sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) }, 1173 { PCI_VENDOR_RCC, pciide_serverworks_products, 1174 sizeof(pciide_serverworks_products)/sizeof(pciide_serverworks_products[0]) }, 1175 { PCI_VENDOR_PROMISE, pciide_promise_products, 1176 sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }, 1177 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1178 sizeof(pciide_nvidia_products)/sizeof(pciide_nvidia_products[0]) }, 1179 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1180 sizeof(pciide_ite_products)/sizeof(pciide_ite_products[0]) }, 1181 { PCI_VENDOR_ATI, pciide_ati_products, 1182 sizeof(pciide_ati_products)/sizeof(pciide_ati_products[0]) }, 1183 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1184 sizeof(pciide_jmicron_products)/sizeof(pciide_jmicron_products[0]) } 1185 }; 1186 1187 /* options passed via the 'flags' config keyword */ 1188 #define PCIIDE_OPTIONS_DMA 0x01 1189 1190 int pciide_match(struct device *, void *, void *); 1191 void pciide_attach(struct device *, struct device *, void *); 1192 1193 struct cfattach pciide_pci_ca = { 1194 sizeof(struct pciide_softc), pciide_match, pciide_attach 1195 }; 1196 1197 struct cfattach pciide_jmb_ca = { 1198 sizeof(struct pciide_softc), pciide_match, pciide_attach 1199 }; 1200 1201 struct cfdriver pciide_cd = { 1202 NULL, "pciide", DV_DULL 1203 }; 1204 1205 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1206 1207 const struct pciide_product_desc * 1208 pciide_lookup_product(u_int32_t id) 1209 { 1210 const struct pciide_product_desc *pp; 1211 const struct pciide_vendor_desc *vp; 1212 int i; 1213 1214 for (i = 0, vp = pciide_vendors; 1215 i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]); 1216 vp++, i++) 1217 if (PCI_VENDOR(id) == vp->ide_vendor) 1218 break; 1219 1220 if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0])) 1221 return (NULL); 1222 1223 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1224 if (PCI_PRODUCT(id) == pp->ide_product) 1225 break; 1226 1227 if (i == vp->ide_nproducts) 1228 return (NULL); 1229 return (pp); 1230 } 1231 1232 int 1233 pciide_match(struct device *parent, void *match, void *aux) 1234 { 1235 struct pci_attach_args *pa = aux; 1236 const struct pciide_product_desc *pp; 1237 1238 /* 1239 * Some IDE controllers have severe bugs when used in PCI mode. 1240 * We punt and attach them to the ISA bus instead. 1241 */ 1242 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1243 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1244 return (0); 1245 1246 /* 1247 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1248 * controllers. Let see if we can deal with it anyway. 1249 */ 1250 pp = pciide_lookup_product(pa->pa_id); 1251 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1252 return (1); 1253 1254 /* 1255 * Check the ID register to see that it's a PCI IDE controller. 1256 * If it is, we assume that we can deal with it; it _should_ 1257 * work in a standardized way... 1258 */ 1259 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1260 switch (PCI_SUBCLASS(pa->pa_class)) { 1261 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1262 return (1); 1263 1264 /* 1265 * We only match these if we know they have 1266 * a match, as we may not support native interfaces 1267 * on them. 1268 */ 1269 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1270 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1271 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1272 if (pp) 1273 return (1); 1274 else 1275 return (0); 1276 break; 1277 } 1278 } 1279 1280 return (0); 1281 } 1282 1283 void 1284 pciide_attach(struct device *parent, struct device *self, void *aux) 1285 { 1286 struct pciide_softc *sc = (struct pciide_softc *)self; 1287 struct pci_attach_args *pa = aux; 1288 1289 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1290 if (sc->sc_pp == NULL) 1291 sc->sc_pp = &default_product_desc; 1292 sc->sc_rev = PCI_REVISION(pa->pa_class); 1293 1294 sc->sc_pc = pa->pa_pc; 1295 sc->sc_tag = pa->pa_tag; 1296 1297 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1298 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1299 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1300 1301 sc->sc_dmacmd_read = pciide_dmacmd_read; 1302 sc->sc_dmacmd_write = pciide_dmacmd_write; 1303 sc->sc_dmactl_read = pciide_dmactl_read; 1304 sc->sc_dmactl_write = pciide_dmactl_write; 1305 sc->sc_dmatbl_write = pciide_dmatbl_write; 1306 1307 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1308 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1309 1310 sc->sc_pp->chip_map(sc, pa); 1311 1312 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1313 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1314 DEBUG_PROBE); 1315 } 1316 1317 int 1318 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1319 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1320 { 1321 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1322 struct channel_softc *wdc_cp = &cp->wdc_channel; 1323 pcireg_t csr; 1324 1325 cp->compat = 1; 1326 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1327 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1328 1329 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1330 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1331 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1332 1333 wdc_cp->cmd_iot = pa->pa_iot; 1334 1335 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1336 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1337 printf("%s: couldn't map %s cmd regs\n", 1338 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1339 return (0); 1340 } 1341 1342 wdc_cp->ctl_iot = pa->pa_iot; 1343 1344 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1345 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1346 printf("%s: couldn't map %s ctl regs\n", 1347 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1348 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1349 PCIIDE_COMPAT_CMD_SIZE); 1350 return (0); 1351 } 1352 1353 return (1); 1354 } 1355 1356 int 1357 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1358 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1359 { 1360 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1361 struct channel_softc *wdc_cp = &cp->wdc_channel; 1362 const char *intrstr; 1363 pci_intr_handle_t intrhandle; 1364 pcireg_t maptype; 1365 1366 cp->compat = 0; 1367 1368 if (sc->sc_pci_ih == NULL) { 1369 if (pci_intr_map(pa, &intrhandle) != 0) { 1370 printf("%s: couldn't map native-PCI interrupt\n", 1371 sc->sc_wdcdev.sc_dev.dv_xname); 1372 return (0); 1373 } 1374 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1375 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1376 intrhandle, IPL_BIO, pci_intr, sc, 1377 sc->sc_wdcdev.sc_dev.dv_xname); 1378 if (sc->sc_pci_ih != NULL) { 1379 printf("%s: using %s for native-PCI interrupt\n", 1380 sc->sc_wdcdev.sc_dev.dv_xname, 1381 intrstr ? intrstr : "unknown interrupt"); 1382 } else { 1383 printf("%s: couldn't establish native-PCI interrupt", 1384 sc->sc_wdcdev.sc_dev.dv_xname); 1385 if (intrstr != NULL) 1386 printf(" at %s", intrstr); 1387 printf("\n"); 1388 return (0); 1389 } 1390 } 1391 cp->ih = sc->sc_pci_ih; 1392 1393 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1394 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1395 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1396 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1397 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1398 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1399 maptype, 0, 1400 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1401 printf("%s: couldn't map %s cmd regs\n", 1402 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1403 return (0); 1404 } 1405 1406 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1407 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1408 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1409 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1410 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1411 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1412 maptype, 0, 1413 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1414 printf("%s: couldn't map %s ctl regs\n", 1415 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1416 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1417 return (0); 1418 } 1419 /* 1420 * In native mode, 4 bytes of I/O space are mapped for the control 1421 * register, the control register is at offset 2. Pass the generic 1422 * code a handle for only one byte at the right offset. 1423 */ 1424 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1425 &wdc_cp->ctl_ioh) != 0) { 1426 printf("%s: unable to subregion %s ctl regs\n", 1427 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1428 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1429 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1430 return (0); 1431 } 1432 return (1); 1433 } 1434 1435 void 1436 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1437 { 1438 pcireg_t maptype; 1439 bus_addr_t addr; 1440 1441 /* 1442 * Map DMA registers 1443 * 1444 * Note that sc_dma_ok is the right variable to test to see if 1445 * DMA can be done. If the interface doesn't support DMA, 1446 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1447 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1448 * non-zero if the interface supports DMA and the registers 1449 * could be mapped. 1450 * 1451 * XXX Note that despite the fact that the Bus Master IDE specs 1452 * XXX say that "The bus master IDE function uses 16 bytes of IO 1453 * XXX space", some controllers (at least the United 1454 * XXX Microelectronics UM8886BF) place it in memory space. 1455 */ 1456 1457 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1458 PCIIDE_REG_BUS_MASTER_DMA); 1459 1460 switch (maptype) { 1461 case PCI_MAPREG_TYPE_IO: 1462 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1463 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1464 &addr, NULL, NULL) == 0); 1465 if (sc->sc_dma_ok == 0) { 1466 printf(", unused (couldn't query registers)"); 1467 break; 1468 } 1469 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1470 && addr >= 0x10000) { 1471 sc->sc_dma_ok = 0; 1472 printf(", unused (registers at unsafe address %#lx)", addr); 1473 break; 1474 } 1475 /* FALLTHROUGH */ 1476 1477 case PCI_MAPREG_MEM_TYPE_32BIT: 1478 sc->sc_dma_ok = (pci_mapreg_map(pa, 1479 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1480 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL, 0) == 0); 1481 sc->sc_dmat = pa->pa_dmat; 1482 if (sc->sc_dma_ok == 0) { 1483 printf(", unused (couldn't map registers)"); 1484 } else { 1485 sc->sc_wdcdev.dma_arg = sc; 1486 sc->sc_wdcdev.dma_init = pciide_dma_init; 1487 sc->sc_wdcdev.dma_start = pciide_dma_start; 1488 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1489 } 1490 break; 1491 1492 default: 1493 sc->sc_dma_ok = 0; 1494 printf(", (unsupported maptype 0x%x)", maptype); 1495 break; 1496 } 1497 } 1498 1499 int 1500 pciide_intr_flag(struct pciide_channel *cp) 1501 { 1502 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1503 int chan = cp->wdc_channel.channel; 1504 1505 if (cp->dma_in_progress) { 1506 int retry = 10; 1507 int status; 1508 1509 /* Check the status register */ 1510 for (retry = 10; retry > 0; retry--) { 1511 status = PCIIDE_DMACTL_READ(sc, chan); 1512 if (status & IDEDMA_CTL_INTR) { 1513 break; 1514 } 1515 DELAY(5); 1516 } 1517 1518 /* Not for us. */ 1519 if (retry == 0) 1520 return (0); 1521 1522 return (1); 1523 } 1524 1525 return (-1); 1526 } 1527 1528 int 1529 pciide_compat_intr(void *arg) 1530 { 1531 struct pciide_channel *cp = arg; 1532 1533 if (pciide_intr_flag(cp) == 0) 1534 return (0); 1535 1536 #ifdef DIAGNOSTIC 1537 /* should only be called for a compat channel */ 1538 if (cp->compat == 0) 1539 panic("pciide compat intr called for non-compat chan %p", cp); 1540 #endif 1541 return (wdcintr(&cp->wdc_channel)); 1542 } 1543 1544 int 1545 pciide_pci_intr(void *arg) 1546 { 1547 struct pciide_softc *sc = arg; 1548 struct pciide_channel *cp; 1549 struct channel_softc *wdc_cp; 1550 int i, rv, crv; 1551 1552 rv = 0; 1553 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1554 cp = &sc->pciide_channels[i]; 1555 wdc_cp = &cp->wdc_channel; 1556 1557 /* If a compat channel skip. */ 1558 if (cp->compat) 1559 continue; 1560 1561 if (pciide_intr_flag(cp) == 0) 1562 continue; 1563 1564 crv = wdcintr(wdc_cp); 1565 if (crv == 0) 1566 ; /* leave rv alone */ 1567 else if (crv == 1) 1568 rv = 1; /* claim the intr */ 1569 else if (rv == 0) /* crv should be -1 in this case */ 1570 rv = crv; /* if we've done no better, take it */ 1571 } 1572 return (rv); 1573 } 1574 1575 u_int8_t 1576 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1577 { 1578 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1579 IDEDMA_CMD(chan))); 1580 } 1581 1582 void 1583 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1584 { 1585 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1586 IDEDMA_CMD(chan), val); 1587 } 1588 1589 u_int8_t 1590 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1591 { 1592 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1593 IDEDMA_CTL(chan))); 1594 } 1595 1596 void 1597 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 1598 { 1599 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1600 IDEDMA_CTL(chan), val); 1601 } 1602 1603 void 1604 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 1605 { 1606 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1607 IDEDMA_TBL(chan), val); 1608 } 1609 1610 void 1611 pciide_channel_dma_setup(struct pciide_channel *cp) 1612 { 1613 int drive; 1614 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1615 struct ata_drive_datas *drvp; 1616 1617 for (drive = 0; drive < 2; drive++) { 1618 drvp = &cp->wdc_channel.ch_drive[drive]; 1619 /* If no drive, skip */ 1620 if ((drvp->drive_flags & DRIVE) == 0) 1621 continue; 1622 /* setup DMA if needed */ 1623 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1624 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1625 sc->sc_dma_ok == 0) { 1626 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1627 continue; 1628 } 1629 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1630 != 0) { 1631 /* Abort DMA setup */ 1632 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1633 continue; 1634 } 1635 } 1636 } 1637 1638 int 1639 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 1640 { 1641 bus_dma_segment_t seg; 1642 int error, rseg; 1643 const bus_size_t dma_table_size = 1644 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1645 struct pciide_dma_maps *dma_maps = 1646 &sc->pciide_channels[channel].dma_maps[drive]; 1647 1648 /* If table was already allocated, just return */ 1649 if (dma_maps->dma_table) 1650 return (0); 1651 1652 /* Allocate memory for the DMA tables and map it */ 1653 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1654 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1655 BUS_DMA_NOWAIT)) != 0) { 1656 printf("%s:%d: unable to allocate table DMA for " 1657 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1658 channel, drive, error); 1659 return (error); 1660 } 1661 1662 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1663 dma_table_size, 1664 (caddr_t *)&dma_maps->dma_table, 1665 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1666 printf("%s:%d: unable to map table DMA for" 1667 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1668 channel, drive, error); 1669 return (error); 1670 } 1671 1672 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 1673 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 1674 seg.ds_addr), DEBUG_PROBE); 1675 1676 /* Create and load table DMA map for this disk */ 1677 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1678 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1679 &dma_maps->dmamap_table)) != 0) { 1680 printf("%s:%d: unable to create table DMA map for " 1681 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1682 channel, drive, error); 1683 return (error); 1684 } 1685 if ((error = bus_dmamap_load(sc->sc_dmat, 1686 dma_maps->dmamap_table, 1687 dma_maps->dma_table, 1688 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1689 printf("%s:%d: unable to load table DMA map for " 1690 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1691 channel, drive, error); 1692 return (error); 1693 } 1694 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1695 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 1696 /* Create a xfer DMA map for this drive */ 1697 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1698 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 1699 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1700 &dma_maps->dmamap_xfer)) != 0) { 1701 printf("%s:%d: unable to create xfer DMA map for " 1702 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1703 channel, drive, error); 1704 return (error); 1705 } 1706 return (0); 1707 } 1708 1709 int 1710 pciide_dma_init(void *v, int channel, int drive, void *databuf, 1711 size_t datalen, int flags) 1712 { 1713 struct pciide_softc *sc = v; 1714 int error, seg; 1715 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1716 struct pciide_dma_maps *dma_maps = 1717 &sc->pciide_channels[channel].dma_maps[drive]; 1718 #ifndef BUS_DMA_RAW 1719 #define BUS_DMA_RAW 0 1720 #endif 1721 1722 error = bus_dmamap_load(sc->sc_dmat, 1723 dma_maps->dmamap_xfer, 1724 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 1725 if (error) { 1726 printf("%s:%d: unable to load xfer DMA map for " 1727 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1728 channel, drive, error); 1729 return (error); 1730 } 1731 1732 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1733 dma_maps->dmamap_xfer->dm_mapsize, 1734 (flags & WDC_DMA_READ) ? 1735 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1736 1737 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1738 #ifdef DIAGNOSTIC 1739 /* A segment must not cross a 64k boundary */ 1740 { 1741 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1742 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1743 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1744 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1745 printf("pciide_dma: segment %d physical addr 0x%lx" 1746 " len 0x%lx not properly aligned\n", 1747 seg, phys, len); 1748 panic("pciide_dma: buf align"); 1749 } 1750 } 1751 #endif 1752 dma_maps->dma_table[seg].base_addr = 1753 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1754 dma_maps->dma_table[seg].byte_count = 1755 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1756 IDEDMA_BYTE_COUNT_MASK); 1757 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1758 seg, letoh32(dma_maps->dma_table[seg].byte_count), 1759 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1760 1761 } 1762 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1763 htole32(IDEDMA_BYTE_COUNT_EOT); 1764 1765 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1766 dma_maps->dmamap_table->dm_mapsize, 1767 BUS_DMASYNC_PREWRITE); 1768 1769 /* Maps are ready. Start DMA function */ 1770 #ifdef DIAGNOSTIC 1771 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1772 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1773 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1774 panic("pciide_dma_init: table align"); 1775 } 1776 #endif 1777 1778 /* Clear status bits */ 1779 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 1780 /* Write table addr */ 1781 PCIIDE_DMATBL_WRITE(sc, channel, 1782 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1783 /* set read/write */ 1784 PCIIDE_DMACMD_WRITE(sc, channel, 1785 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 1786 /* remember flags */ 1787 dma_maps->dma_flags = flags; 1788 return (0); 1789 } 1790 1791 void 1792 pciide_dma_start(void *v, int channel, int drive) 1793 { 1794 struct pciide_softc *sc = v; 1795 1796 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 1797 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 1798 IDEDMA_CMD_START); 1799 1800 sc->pciide_channels[channel].dma_in_progress = 1; 1801 } 1802 1803 int 1804 pciide_dma_finish(void *v, int channel, int drive, int force) 1805 { 1806 struct pciide_softc *sc = v; 1807 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1808 u_int8_t status; 1809 int error = 0; 1810 struct pciide_dma_maps *dma_maps = 1811 &sc->pciide_channels[channel].dma_maps[drive]; 1812 1813 status = PCIIDE_DMACTL_READ(sc, channel); 1814 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1815 DEBUG_XFERS); 1816 1817 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 1818 error = WDC_DMAST_NOIRQ; 1819 goto done; 1820 } 1821 1822 /* stop DMA channel */ 1823 PCIIDE_DMACMD_WRITE(sc, channel, 1824 ((dma_maps->dma_flags & WDC_DMA_READ) ? 1825 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 1826 1827 /* Unload the map of the data buffer */ 1828 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1829 dma_maps->dmamap_xfer->dm_mapsize, 1830 (dma_maps->dma_flags & WDC_DMA_READ) ? 1831 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1832 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1833 1834 /* Clear status bits */ 1835 PCIIDE_DMACTL_WRITE(sc, channel, status); 1836 1837 if ((status & IDEDMA_CTL_ERR) != 0) { 1838 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1839 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1840 error |= WDC_DMAST_ERR; 1841 } 1842 1843 if ((status & IDEDMA_CTL_INTR) == 0) { 1844 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1845 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1846 drive, status); 1847 error |= WDC_DMAST_NOIRQ; 1848 } 1849 1850 if ((status & IDEDMA_CTL_ACT) != 0) { 1851 /* data underrun, may be a valid condition for ATAPI */ 1852 error |= WDC_DMAST_UNDER; 1853 } 1854 1855 done: 1856 sc->pciide_channels[channel].dma_in_progress = 0; 1857 return (error); 1858 } 1859 1860 void 1861 pciide_irqack(struct channel_softc *chp) 1862 { 1863 struct pciide_channel *cp = (struct pciide_channel *)chp; 1864 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1865 int chan = chp->channel; 1866 1867 /* clear status bits in IDE DMA registers */ 1868 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 1869 } 1870 1871 /* some common code used by several chip_map */ 1872 int 1873 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 1874 { 1875 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1876 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1877 cp->name = PCIIDE_CHANNEL_NAME(channel); 1878 cp->wdc_channel.channel = channel; 1879 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1880 cp->wdc_channel.ch_queue = 1881 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1882 if (cp->wdc_channel.ch_queue == NULL) { 1883 printf("%s: %s " 1884 "cannot allocate memory for command queue", 1885 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1886 return (0); 1887 } 1888 cp->hw_ok = 1; 1889 1890 return (1); 1891 } 1892 1893 /* some common code used by several chip channel_map */ 1894 void 1895 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 1896 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 1897 int (*pci_intr)(void *)) 1898 { 1899 struct channel_softc *wdc_cp = &cp->wdc_channel; 1900 1901 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1902 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1903 pci_intr); 1904 else 1905 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1906 wdc_cp->channel, cmdsizep, ctlsizep); 1907 if (cp->hw_ok == 0) 1908 return; 1909 wdc_cp->data32iot = wdc_cp->cmd_iot; 1910 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1911 wdcattach(wdc_cp); 1912 } 1913 1914 /* 1915 * Generic code to call to know if a channel can be disabled. Return 1 1916 * if channel can be disabled, 0 if not 1917 */ 1918 int 1919 pciide_chan_candisable(struct pciide_channel *cp) 1920 { 1921 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1922 struct channel_softc *wdc_cp = &cp->wdc_channel; 1923 1924 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1925 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1926 printf("%s: %s disabled (no drives)\n", 1927 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1928 cp->hw_ok = 0; 1929 return (1); 1930 } 1931 return (0); 1932 } 1933 1934 /* 1935 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1936 * Set hw_ok=0 on failure 1937 */ 1938 void 1939 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 1940 int compatchan, int interface) 1941 { 1942 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1943 struct channel_softc *wdc_cp = &cp->wdc_channel; 1944 1945 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1946 return; 1947 1948 cp->compat = 1; 1949 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1950 pa, compatchan, pciide_compat_intr, cp); 1951 if (cp->ih == NULL) { 1952 printf("%s: no compatibility interrupt for use by %s\n", 1953 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1954 cp->hw_ok = 0; 1955 } 1956 } 1957 1958 /* 1959 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 1960 * Set hw_ok=0 on failure 1961 */ 1962 void 1963 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 1964 int compatchan, int interface) 1965 { 1966 struct channel_softc *wdc_cp = &cp->wdc_channel; 1967 1968 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1969 return; 1970 1971 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 1972 } 1973 1974 void 1975 pciide_print_channels(int nchannels, pcireg_t interface) 1976 { 1977 int i; 1978 1979 for (i = 0; i < nchannels; i++) { 1980 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 1981 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 1982 "configured" : "wired", 1983 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 1984 "compatibility"); 1985 } 1986 1987 printf("\n"); 1988 } 1989 1990 void 1991 pciide_print_modes(struct pciide_channel *cp) 1992 { 1993 wdc_print_current_modes(&cp->wdc_channel); 1994 } 1995 1996 void 1997 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 1998 { 1999 struct pciide_channel *cp; 2000 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2001 pcireg_t csr; 2002 int channel, drive; 2003 struct ata_drive_datas *drvp; 2004 u_int8_t idedma_ctl; 2005 bus_size_t cmdsize, ctlsize; 2006 char *failreason; 2007 2008 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2009 printf(": DMA"); 2010 if (sc->sc_pp == &default_product_desc && 2011 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2012 PCIIDE_OPTIONS_DMA) == 0) { 2013 printf(" (unsupported)"); 2014 sc->sc_dma_ok = 0; 2015 } else { 2016 pciide_mapreg_dma(sc, pa); 2017 if (sc->sc_dma_ok != 0) 2018 printf(", (partial support)"); 2019 } 2020 } else { 2021 printf(": no DMA"); 2022 sc->sc_dma_ok = 0; 2023 } 2024 if (sc->sc_dma_ok) { 2025 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2026 sc->sc_wdcdev.irqack = pciide_irqack; 2027 } 2028 sc->sc_wdcdev.PIO_cap = 0; 2029 sc->sc_wdcdev.DMA_cap = 0; 2030 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2031 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2032 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2033 2034 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2035 2036 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2037 cp = &sc->pciide_channels[channel]; 2038 if (pciide_chansetup(sc, channel, interface) == 0) 2039 continue; 2040 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2041 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2042 &ctlsize, pciide_pci_intr); 2043 } else { 2044 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2045 channel, &cmdsize, &ctlsize); 2046 } 2047 if (cp->hw_ok == 0) 2048 continue; 2049 /* 2050 * Check to see if something appears to be there. 2051 */ 2052 failreason = NULL; 2053 pciide_map_compat_intr(pa, cp, channel, interface); 2054 if (cp->hw_ok == 0) 2055 continue; 2056 if (!wdcprobe(&cp->wdc_channel)) { 2057 failreason = "not responding; disabled or no drives?"; 2058 goto next; 2059 } 2060 /* 2061 * Now, make sure it's actually attributable to this PCI IDE 2062 * channel by trying to access the channel again while the 2063 * PCI IDE controller's I/O space is disabled. (If the 2064 * channel no longer appears to be there, it belongs to 2065 * this controller.) YUCK! 2066 */ 2067 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2068 PCI_COMMAND_STATUS_REG); 2069 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2070 csr & ~PCI_COMMAND_IO_ENABLE); 2071 if (wdcprobe(&cp->wdc_channel)) 2072 failreason = "other hardware responding at addresses"; 2073 pci_conf_write(sc->sc_pc, sc->sc_tag, 2074 PCI_COMMAND_STATUS_REG, csr); 2075 next: 2076 if (failreason) { 2077 printf("%s: %s ignored (%s)\n", 2078 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2079 failreason); 2080 cp->hw_ok = 0; 2081 pciide_unmap_compat_intr(pa, cp, channel, interface); 2082 bus_space_unmap(cp->wdc_channel.cmd_iot, 2083 cp->wdc_channel.cmd_ioh, cmdsize); 2084 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2085 bus_space_unmap(cp->wdc_channel.ctl_iot, 2086 cp->ctl_baseioh, ctlsize); 2087 else 2088 bus_space_unmap(cp->wdc_channel.ctl_iot, 2089 cp->wdc_channel.ctl_ioh, ctlsize); 2090 } 2091 if (cp->hw_ok) { 2092 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2093 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2094 wdcattach(&cp->wdc_channel); 2095 } 2096 } 2097 2098 if (sc->sc_dma_ok == 0) 2099 return; 2100 2101 /* Allocate DMA maps */ 2102 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2103 idedma_ctl = 0; 2104 cp = &sc->pciide_channels[channel]; 2105 for (drive = 0; drive < 2; drive++) { 2106 drvp = &cp->wdc_channel.ch_drive[drive]; 2107 /* If no drive, skip */ 2108 if ((drvp->drive_flags & DRIVE) == 0) 2109 continue; 2110 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2111 continue; 2112 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2113 /* Abort DMA setup */ 2114 printf("%s:%d:%d: cannot allocate DMA maps, " 2115 "using PIO transfers\n", 2116 sc->sc_wdcdev.sc_dev.dv_xname, 2117 channel, drive); 2118 drvp->drive_flags &= ~DRIVE_DMA; 2119 } 2120 printf("%s:%d:%d: using DMA data transfers\n", 2121 sc->sc_wdcdev.sc_dev.dv_xname, 2122 channel, drive); 2123 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2124 } 2125 if (idedma_ctl != 0) { 2126 /* Add software bits in status register */ 2127 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2128 } 2129 } 2130 } 2131 2132 void 2133 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2134 { 2135 struct pciide_channel *cp; 2136 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2137 int channel; 2138 bus_size_t cmdsize, ctlsize; 2139 2140 if (interface == 0) { 2141 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2142 DEBUG_PROBE); 2143 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2144 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2145 } 2146 2147 printf(": DMA"); 2148 pciide_mapreg_dma(sc, pa); 2149 printf("\n"); 2150 2151 if (sc->sc_dma_ok) { 2152 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2153 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2154 sc->sc_wdcdev.irqack = pciide_irqack; 2155 } 2156 sc->sc_wdcdev.PIO_cap = 4; 2157 sc->sc_wdcdev.DMA_cap = 2; 2158 sc->sc_wdcdev.UDMA_cap = 6; 2159 2160 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2161 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2162 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2163 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2164 sc->sc_wdcdev.set_modes = sata_setup_channel; 2165 2166 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2167 cp = &sc->pciide_channels[channel]; 2168 if (pciide_chansetup(sc, channel, interface) == 0) 2169 continue; 2170 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2171 pciide_pci_intr); 2172 sata_setup_channel(&cp->wdc_channel); 2173 } 2174 } 2175 2176 void 2177 sata_setup_channel(struct channel_softc *chp) 2178 { 2179 struct ata_drive_datas *drvp; 2180 int drive; 2181 u_int32_t idedma_ctl; 2182 struct pciide_channel *cp = (struct pciide_channel *)chp; 2183 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2184 2185 /* setup DMA if needed */ 2186 pciide_channel_dma_setup(cp); 2187 2188 idedma_ctl = 0; 2189 2190 for (drive = 0; drive < 2; drive++) { 2191 drvp = &chp->ch_drive[drive]; 2192 /* If no drive, skip */ 2193 if ((drvp->drive_flags & DRIVE) == 0) 2194 continue; 2195 if (drvp->drive_flags & DRIVE_UDMA) { 2196 /* use Ultra/DMA */ 2197 drvp->drive_flags &= ~DRIVE_DMA; 2198 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2199 } else if (drvp->drive_flags & DRIVE_DMA) { 2200 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2201 } 2202 } 2203 2204 /* 2205 * Nothing to do to setup modes; it is meaningless in S-ATA 2206 * (but many S-ATA drives still want to get the SET_FEATURE 2207 * command). 2208 */ 2209 if (idedma_ctl != 0) { 2210 /* Add software bits in status register */ 2211 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2212 } 2213 pciide_print_modes(cp); 2214 } 2215 2216 void 2217 piix_timing_debug(struct pciide_softc *sc) 2218 { 2219 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2220 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2221 DEBUG_PROBE); 2222 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2223 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2224 WDCDEBUG_PRINT((", sidetim=0x%x", 2225 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2226 DEBUG_PROBE); 2227 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2228 WDCDEBUG_PRINT((", udmareg 0x%x", 2229 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2230 DEBUG_PROBE); 2231 } 2232 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2233 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2234 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2235 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2236 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2237 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2238 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2239 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2240 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2241 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2242 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2243 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2244 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2245 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2246 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2247 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2248 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2249 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2250 DEBUG_PROBE); 2251 } 2252 } 2253 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2254 } 2255 2256 void 2257 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2258 { 2259 struct pciide_channel *cp; 2260 int channel; 2261 u_int32_t idetim; 2262 bus_size_t cmdsize, ctlsize; 2263 2264 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2265 2266 printf(": DMA"); 2267 pciide_mapreg_dma(sc, pa); 2268 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2269 WDC_CAPABILITY_MODE; 2270 if (sc->sc_dma_ok) { 2271 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2272 sc->sc_wdcdev.irqack = pciide_irqack; 2273 switch (sc->sc_pp->ide_product) { 2274 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2275 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2276 case PCI_PRODUCT_INTEL_82371AB_IDE: 2277 case PCI_PRODUCT_INTEL_82372FB_IDE: 2278 case PCI_PRODUCT_INTEL_82440MX_IDE: 2279 case PCI_PRODUCT_INTEL_82451NX: 2280 case PCI_PRODUCT_INTEL_82801AA_IDE: 2281 case PCI_PRODUCT_INTEL_82801AB_IDE: 2282 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2283 case PCI_PRODUCT_INTEL_82801BA_IDE: 2284 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2285 case PCI_PRODUCT_INTEL_82801CA_IDE: 2286 case PCI_PRODUCT_INTEL_82801DB_IDE: 2287 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2288 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2289 case PCI_PRODUCT_INTEL_82801EB_IDE: 2290 case PCI_PRODUCT_INTEL_82801FB_IDE: 2291 case PCI_PRODUCT_INTEL_82801GB_IDE: 2292 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2293 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2294 break; 2295 } 2296 } 2297 sc->sc_wdcdev.PIO_cap = 4; 2298 sc->sc_wdcdev.DMA_cap = 2; 2299 switch (sc->sc_pp->ide_product) { 2300 case PCI_PRODUCT_INTEL_82801AA_IDE: 2301 case PCI_PRODUCT_INTEL_82372FB_IDE: 2302 sc->sc_wdcdev.UDMA_cap = 4; 2303 break; 2304 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2305 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2306 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2307 case PCI_PRODUCT_INTEL_82801BA_IDE: 2308 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2309 case PCI_PRODUCT_INTEL_82801CA_IDE: 2310 case PCI_PRODUCT_INTEL_82801DB_IDE: 2311 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2312 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2313 case PCI_PRODUCT_INTEL_82801EB_IDE: 2314 case PCI_PRODUCT_INTEL_82801FB_IDE: 2315 case PCI_PRODUCT_INTEL_82801GB_IDE: 2316 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2317 sc->sc_wdcdev.UDMA_cap = 5; 2318 break; 2319 default: 2320 sc->sc_wdcdev.UDMA_cap = 2; 2321 break; 2322 } 2323 2324 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2325 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2326 sc->sc_wdcdev.set_modes = piix_setup_channel; 2327 } else { 2328 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2329 } 2330 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2331 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2332 2333 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2334 2335 piix_timing_debug(sc); 2336 2337 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2338 cp = &sc->pciide_channels[channel]; 2339 2340 /* PIIX is compat-only */ 2341 if (pciide_chansetup(sc, channel, 0) == 0) 2342 continue; 2343 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2344 if ((PIIX_IDETIM_READ(idetim, channel) & 2345 PIIX_IDETIM_IDE) == 0) { 2346 printf("%s: %s ignored (disabled)\n", 2347 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2348 continue; 2349 } 2350 /* PIIX are compat-only pciide devices */ 2351 pciide_map_compat_intr(pa, cp, channel, 0); 2352 if (cp->hw_ok == 0) 2353 continue; 2354 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 2355 if (cp->hw_ok == 0) 2356 goto next; 2357 if (pciide_chan_candisable(cp)) { 2358 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2359 channel); 2360 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2361 idetim); 2362 } 2363 if (cp->hw_ok == 0) 2364 goto next; 2365 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2366 next: 2367 if (cp->hw_ok == 0) 2368 pciide_unmap_compat_intr(pa, cp, channel, 0); 2369 } 2370 2371 piix_timing_debug(sc); 2372 } 2373 2374 void 2375 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2376 { 2377 struct pciide_channel *cp; 2378 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2379 int channel; 2380 bus_size_t cmdsize, ctlsize; 2381 u_int8_t reg, ich = 0; 2382 2383 printf(": DMA"); 2384 pciide_mapreg_dma(sc, pa); 2385 2386 if (sc->sc_dma_ok) { 2387 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2388 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2389 sc->sc_wdcdev.irqack = pciide_irqack; 2390 sc->sc_wdcdev.DMA_cap = 2; 2391 sc->sc_wdcdev.UDMA_cap = 6; 2392 } 2393 sc->sc_wdcdev.PIO_cap = 4; 2394 2395 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2396 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2397 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2398 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2399 sc->sc_wdcdev.set_modes = sata_setup_channel; 2400 2401 switch(sc->sc_pp->ide_product) { 2402 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2403 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2404 case PCI_PRODUCT_INTEL_82801EB_SATA: 2405 case PCI_PRODUCT_INTEL_82801ER_SATA: 2406 ich = 5; 2407 break; 2408 case PCI_PRODUCT_INTEL_82801FB_SATA: 2409 case PCI_PRODUCT_INTEL_82801FR_SATA: 2410 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2411 ich = 6; 2412 break; 2413 default: 2414 ich = 7; 2415 break; 2416 } 2417 2418 /* 2419 * Put the SATA portion of controllers that don't operate in combined 2420 * mode into native PCI modes so the maximum number of devices can be 2421 * used. Intel calls this "enhanced mode" 2422 */ 2423 if (ich == 5) { 2424 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2425 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2426 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2427 ICH5_SATA_PI); 2428 reg |= ICH5_SATA_PI_PRI_NATIVE | 2429 ICH5_SATA_PI_SEC_NATIVE; 2430 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2431 ICH5_SATA_PI, reg); 2432 interface |= PCIIDE_INTERFACE_PCI(0) | 2433 PCIIDE_INTERFACE_PCI(1); 2434 } 2435 } else { 2436 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2437 ICH6_SATA_MAP_CMB_MASK; 2438 if (reg != ICH6_SATA_MAP_CMB_PRI && 2439 reg != ICH6_SATA_MAP_CMB_SEC) { 2440 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2441 ICH5_SATA_PI); 2442 reg |= ICH5_SATA_PI_PRI_NATIVE | 2443 ICH5_SATA_PI_SEC_NATIVE; 2444 2445 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2446 ICH5_SATA_PI, reg); 2447 interface |= PCIIDE_INTERFACE_PCI(0) | 2448 PCIIDE_INTERFACE_PCI(1); 2449 2450 /* 2451 * Ask for SATA IDE Mode, we don't need to do this 2452 * for the combined mode case as combined mode is 2453 * only allowed in IDE Mode 2454 */ 2455 if (ich >= 7) { 2456 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2457 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2458 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2459 ICH5_SATA_MAP, reg); 2460 } 2461 } 2462 } 2463 2464 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2465 2466 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2467 cp = &sc->pciide_channels[channel]; 2468 if (pciide_chansetup(sc, channel, interface) == 0) 2469 continue; 2470 2471 pciide_map_compat_intr(pa, cp, channel, interface); 2472 if (cp->hw_ok == 0) 2473 continue; 2474 2475 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2476 pciide_pci_intr); 2477 if (cp->hw_ok != 0) 2478 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2479 2480 if (cp->hw_ok == 0) 2481 pciide_unmap_compat_intr(pa, cp, channel, interface); 2482 } 2483 } 2484 2485 void 2486 piix_setup_channel(struct channel_softc *chp) 2487 { 2488 u_int8_t mode[2], drive; 2489 u_int32_t oidetim, idetim, idedma_ctl; 2490 struct pciide_channel *cp = (struct pciide_channel *)chp; 2491 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2492 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2493 2494 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2495 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2496 idedma_ctl = 0; 2497 2498 /* set up new idetim: Enable IDE registers decode */ 2499 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2500 chp->channel); 2501 2502 /* setup DMA */ 2503 pciide_channel_dma_setup(cp); 2504 2505 /* 2506 * Here we have to mess up with drives mode: PIIX can't have 2507 * different timings for master and slave drives. 2508 * We need to find the best combination. 2509 */ 2510 2511 /* If both drives supports DMA, take the lower mode */ 2512 if ((drvp[0].drive_flags & DRIVE_DMA) && 2513 (drvp[1].drive_flags & DRIVE_DMA)) { 2514 mode[0] = mode[1] = 2515 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2516 drvp[0].DMA_mode = mode[0]; 2517 drvp[1].DMA_mode = mode[1]; 2518 goto ok; 2519 } 2520 /* 2521 * If only one drive supports DMA, use its mode, and 2522 * put the other one in PIO mode 0 if mode not compatible 2523 */ 2524 if (drvp[0].drive_flags & DRIVE_DMA) { 2525 mode[0] = drvp[0].DMA_mode; 2526 mode[1] = drvp[1].PIO_mode; 2527 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2528 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2529 mode[1] = drvp[1].PIO_mode = 0; 2530 goto ok; 2531 } 2532 if (drvp[1].drive_flags & DRIVE_DMA) { 2533 mode[1] = drvp[1].DMA_mode; 2534 mode[0] = drvp[0].PIO_mode; 2535 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2536 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2537 mode[0] = drvp[0].PIO_mode = 0; 2538 goto ok; 2539 } 2540 /* 2541 * If both drives are not DMA, takes the lower mode, unless 2542 * one of them is PIO mode < 2 2543 */ 2544 if (drvp[0].PIO_mode < 2) { 2545 mode[0] = drvp[0].PIO_mode = 0; 2546 mode[1] = drvp[1].PIO_mode; 2547 } else if (drvp[1].PIO_mode < 2) { 2548 mode[1] = drvp[1].PIO_mode = 0; 2549 mode[0] = drvp[0].PIO_mode; 2550 } else { 2551 mode[0] = mode[1] = 2552 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 2553 drvp[0].PIO_mode = mode[0]; 2554 drvp[1].PIO_mode = mode[1]; 2555 } 2556 ok: /* The modes are setup */ 2557 for (drive = 0; drive < 2; drive++) { 2558 if (drvp[drive].drive_flags & DRIVE_DMA) { 2559 idetim |= piix_setup_idetim_timings( 2560 mode[drive], 1, chp->channel); 2561 goto end; 2562 } 2563 } 2564 /* If we are there, none of the drives are DMA */ 2565 if (mode[0] >= 2) 2566 idetim |= piix_setup_idetim_timings( 2567 mode[0], 0, chp->channel); 2568 else 2569 idetim |= piix_setup_idetim_timings( 2570 mode[1], 0, chp->channel); 2571 end: /* 2572 * timing mode is now set up in the controller. Enable 2573 * it per-drive 2574 */ 2575 for (drive = 0; drive < 2; drive++) { 2576 /* If no drive, skip */ 2577 if ((drvp[drive].drive_flags & DRIVE) == 0) 2578 continue; 2579 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 2580 if (drvp[drive].drive_flags & DRIVE_DMA) 2581 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2582 } 2583 if (idedma_ctl != 0) { 2584 /* Add software bits in status register */ 2585 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2586 IDEDMA_CTL(chp->channel), 2587 idedma_ctl); 2588 } 2589 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2590 pciide_print_modes(cp); 2591 } 2592 2593 void 2594 piix3_4_setup_channel(struct channel_softc *chp) 2595 { 2596 struct ata_drive_datas *drvp; 2597 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 2598 struct pciide_channel *cp = (struct pciide_channel *)chp; 2599 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2600 int drive; 2601 int channel = chp->channel; 2602 2603 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2604 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 2605 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 2606 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 2607 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 2608 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 2609 PIIX_SIDETIM_RTC_MASK(channel)); 2610 2611 idedma_ctl = 0; 2612 /* If channel disabled, no need to go further */ 2613 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 2614 return; 2615 /* set up new idetim: Enable IDE registers decode */ 2616 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 2617 2618 /* setup DMA if needed */ 2619 pciide_channel_dma_setup(cp); 2620 2621 for (drive = 0; drive < 2; drive++) { 2622 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 2623 PIIX_UDMATIM_SET(0x3, channel, drive)); 2624 drvp = &chp->ch_drive[drive]; 2625 /* If no drive, skip */ 2626 if ((drvp->drive_flags & DRIVE) == 0) 2627 continue; 2628 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2629 (drvp->drive_flags & DRIVE_UDMA) == 0)) 2630 goto pio; 2631 2632 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2633 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2634 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2635 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2636 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2637 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2638 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2639 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2640 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2641 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2642 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2643 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2644 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2645 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2646 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2647 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2648 ideconf |= PIIX_CONFIG_PINGPONG; 2649 } 2650 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2651 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2652 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2653 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 2654 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 2655 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2656 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2657 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2658 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2659 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2660 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2661 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2662 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 2663 /* setup Ultra/100 */ 2664 if (drvp->UDMA_mode > 2 && 2665 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2666 drvp->UDMA_mode = 2; 2667 if (drvp->UDMA_mode > 4) { 2668 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 2669 } else { 2670 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 2671 if (drvp->UDMA_mode > 2) { 2672 ideconf |= PIIX_CONFIG_UDMA66(channel, 2673 drive); 2674 } else { 2675 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 2676 drive); 2677 } 2678 } 2679 } 2680 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2681 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2682 /* setup Ultra/66 */ 2683 if (drvp->UDMA_mode > 2 && 2684 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2685 drvp->UDMA_mode = 2; 2686 if (drvp->UDMA_mode > 2) 2687 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 2688 else 2689 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 2690 } 2691 2692 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2693 (drvp->drive_flags & DRIVE_UDMA)) { 2694 /* use Ultra/DMA */ 2695 drvp->drive_flags &= ~DRIVE_DMA; 2696 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 2697 udmareg |= PIIX_UDMATIM_SET( 2698 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 2699 } else { 2700 /* use Multiword DMA */ 2701 drvp->drive_flags &= ~DRIVE_UDMA; 2702 if (drive == 0) { 2703 idetim |= piix_setup_idetim_timings( 2704 drvp->DMA_mode, 1, channel); 2705 } else { 2706 sidetim |= piix_setup_sidetim_timings( 2707 drvp->DMA_mode, 1, channel); 2708 idetim =PIIX_IDETIM_SET(idetim, 2709 PIIX_IDETIM_SITRE, channel); 2710 } 2711 } 2712 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2713 2714 pio: /* use PIO mode */ 2715 idetim |= piix_setup_idetim_drvs(drvp); 2716 if (drive == 0) { 2717 idetim |= piix_setup_idetim_timings( 2718 drvp->PIO_mode, 0, channel); 2719 } else { 2720 sidetim |= piix_setup_sidetim_timings( 2721 drvp->PIO_mode, 0, channel); 2722 idetim =PIIX_IDETIM_SET(idetim, 2723 PIIX_IDETIM_SITRE, channel); 2724 } 2725 } 2726 if (idedma_ctl != 0) { 2727 /* Add software bits in status register */ 2728 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2729 IDEDMA_CTL(channel), 2730 idedma_ctl); 2731 } 2732 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2733 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 2734 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 2735 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 2736 pciide_print_modes(cp); 2737 } 2738 2739 2740 /* setup ISP and RTC fields, based on mode */ 2741 u_int32_t 2742 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 2743 { 2744 2745 if (dma) 2746 return (PIIX_IDETIM_SET(0, 2747 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 2748 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 2749 channel)); 2750 else 2751 return (PIIX_IDETIM_SET(0, 2752 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 2753 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 2754 channel)); 2755 } 2756 2757 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 2758 u_int32_t 2759 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 2760 { 2761 u_int32_t ret = 0; 2762 struct channel_softc *chp = drvp->chnl_softc; 2763 u_int8_t channel = chp->channel; 2764 u_int8_t drive = drvp->drive; 2765 2766 /* 2767 * If drive is using UDMA, timings setups are independant 2768 * So just check DMA and PIO here. 2769 */ 2770 if (drvp->drive_flags & DRIVE_DMA) { 2771 /* if mode = DMA mode 0, use compatible timings */ 2772 if ((drvp->drive_flags & DRIVE_DMA) && 2773 drvp->DMA_mode == 0) { 2774 drvp->PIO_mode = 0; 2775 return (ret); 2776 } 2777 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2778 /* 2779 * PIO and DMA timings are the same, use fast timings for PIO 2780 * too, else use compat timings. 2781 */ 2782 if ((piix_isp_pio[drvp->PIO_mode] != 2783 piix_isp_dma[drvp->DMA_mode]) || 2784 (piix_rtc_pio[drvp->PIO_mode] != 2785 piix_rtc_dma[drvp->DMA_mode])) 2786 drvp->PIO_mode = 0; 2787 /* if PIO mode <= 2, use compat timings for PIO */ 2788 if (drvp->PIO_mode <= 2) { 2789 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 2790 channel); 2791 return (ret); 2792 } 2793 } 2794 2795 /* 2796 * Now setup PIO modes. If mode < 2, use compat timings. 2797 * Else enable fast timings. Enable IORDY and prefetch/post 2798 * if PIO mode >= 3. 2799 */ 2800 2801 if (drvp->PIO_mode < 2) 2802 return (ret); 2803 2804 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2805 if (drvp->PIO_mode >= 3) { 2806 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 2807 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 2808 } 2809 return (ret); 2810 } 2811 2812 /* setup values in SIDETIM registers, based on mode */ 2813 u_int32_t 2814 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 2815 { 2816 if (dma) 2817 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 2818 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 2819 else 2820 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 2821 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 2822 } 2823 2824 void 2825 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2826 { 2827 struct pciide_channel *cp; 2828 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2829 int channel; 2830 pcireg_t chanenable; 2831 bus_size_t cmdsize, ctlsize; 2832 2833 printf(": DMA"); 2834 pciide_mapreg_dma(sc, pa); 2835 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2836 WDC_CAPABILITY_MODE; 2837 if (sc->sc_dma_ok) { 2838 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2839 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2840 sc->sc_wdcdev.irqack = pciide_irqack; 2841 } 2842 sc->sc_wdcdev.PIO_cap = 4; 2843 sc->sc_wdcdev.DMA_cap = 2; 2844 switch (sc->sc_pp->ide_product) { 2845 case PCI_PRODUCT_AMD_8111_IDE: 2846 sc->sc_wdcdev.UDMA_cap = 6; 2847 break; 2848 case PCI_PRODUCT_AMD_766_IDE: 2849 case PCI_PRODUCT_AMD_PBC768_IDE: 2850 sc->sc_wdcdev.UDMA_cap = 5; 2851 break; 2852 default: 2853 sc->sc_wdcdev.UDMA_cap = 4; 2854 break; 2855 } 2856 sc->sc_wdcdev.set_modes = amd756_setup_channel; 2857 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2858 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2859 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 2860 2861 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2862 2863 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2864 cp = &sc->pciide_channels[channel]; 2865 if (pciide_chansetup(sc, channel, interface) == 0) 2866 continue; 2867 2868 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 2869 printf("%s: %s ignored (disabled)\n", 2870 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2871 continue; 2872 } 2873 pciide_map_compat_intr(pa, cp, channel, interface); 2874 if (cp->hw_ok == 0) 2875 continue; 2876 2877 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2878 pciide_pci_intr); 2879 2880 if (pciide_chan_candisable(cp)) { 2881 chanenable &= ~AMD756_CHAN_EN(channel); 2882 } 2883 if (cp->hw_ok == 0) { 2884 pciide_unmap_compat_intr(pa, cp, channel, interface); 2885 continue; 2886 } 2887 2888 amd756_setup_channel(&cp->wdc_channel); 2889 } 2890 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 2891 chanenable); 2892 return; 2893 } 2894 2895 void 2896 amd756_setup_channel(struct channel_softc *chp) 2897 { 2898 u_int32_t udmatim_reg, datatim_reg; 2899 u_int8_t idedma_ctl; 2900 int mode, drive; 2901 struct ata_drive_datas *drvp; 2902 struct pciide_channel *cp = (struct pciide_channel *)chp; 2903 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2904 pcireg_t chanenable; 2905 #ifndef PCIIDE_AMD756_ENABLEDMA 2906 int product = sc->sc_pp->ide_product; 2907 int rev = sc->sc_rev; 2908 #endif 2909 2910 idedma_ctl = 0; 2911 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 2912 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 2913 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 2914 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 2915 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 2916 AMD756_CHANSTATUS_EN); 2917 2918 /* setup DMA if needed */ 2919 pciide_channel_dma_setup(cp); 2920 2921 for (drive = 0; drive < 2; drive++) { 2922 drvp = &chp->ch_drive[drive]; 2923 /* If no drive, skip */ 2924 if ((drvp->drive_flags & DRIVE) == 0) 2925 continue; 2926 /* add timing values, setup DMA if needed */ 2927 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2928 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2929 mode = drvp->PIO_mode; 2930 goto pio; 2931 } 2932 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2933 (drvp->drive_flags & DRIVE_UDMA)) { 2934 /* use Ultra/DMA */ 2935 drvp->drive_flags &= ~DRIVE_DMA; 2936 2937 /* Check cable */ 2938 if ((chanenable & AMD756_CABLE(chp->channel, 2939 drive)) == 0 && drvp->UDMA_mode > 2) { 2940 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 2941 "cable not detected\n", drvp->drive_name, 2942 sc->sc_wdcdev.sc_dev.dv_xname, 2943 chp->channel, drive), DEBUG_PROBE); 2944 drvp->UDMA_mode = 2; 2945 } 2946 2947 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 2948 AMD756_UDMA_EN_MTH(chp->channel, drive) | 2949 AMD756_UDMA_TIME(chp->channel, drive, 2950 amd756_udma_tim[drvp->UDMA_mode]); 2951 /* can use PIO timings, MW DMA unused */ 2952 mode = drvp->PIO_mode; 2953 } else { 2954 /* use Multiword DMA, but only if revision is OK */ 2955 drvp->drive_flags &= ~DRIVE_UDMA; 2956 #ifndef PCIIDE_AMD756_ENABLEDMA 2957 /* 2958 * The workaround doesn't seem to be necessary 2959 * with all drives, so it can be disabled by 2960 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 2961 * triggered. 2962 */ 2963 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 2964 printf("%s:%d:%d: multi-word DMA disabled due " 2965 "to chip revision\n", 2966 sc->sc_wdcdev.sc_dev.dv_xname, 2967 chp->channel, drive); 2968 mode = drvp->PIO_mode; 2969 drvp->drive_flags &= ~DRIVE_DMA; 2970 goto pio; 2971 } 2972 #endif 2973 /* mode = min(pio, dma+2) */ 2974 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2975 mode = drvp->PIO_mode; 2976 else 2977 mode = drvp->DMA_mode + 2; 2978 } 2979 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2980 2981 pio: /* setup PIO mode */ 2982 if (mode <= 2) { 2983 drvp->DMA_mode = 0; 2984 drvp->PIO_mode = 0; 2985 mode = 0; 2986 } else { 2987 drvp->PIO_mode = mode; 2988 drvp->DMA_mode = mode - 2; 2989 } 2990 datatim_reg |= 2991 AMD756_DATATIM_PULSE(chp->channel, drive, 2992 amd756_pio_set[mode]) | 2993 AMD756_DATATIM_RECOV(chp->channel, drive, 2994 amd756_pio_rec[mode]); 2995 } 2996 if (idedma_ctl != 0) { 2997 /* Add software bits in status register */ 2998 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2999 IDEDMA_CTL(chp->channel), 3000 idedma_ctl); 3001 } 3002 pciide_print_modes(cp); 3003 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3004 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3005 } 3006 3007 void 3008 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3009 { 3010 struct pciide_channel *cp; 3011 pcireg_t interface; 3012 int channel; 3013 u_int32_t ideconf; 3014 bus_size_t cmdsize, ctlsize; 3015 pcitag_t tag; 3016 pcireg_t id, class; 3017 3018 /* 3019 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3020 */ 3021 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3022 interface = PCI_INTERFACE(pa->pa_class); 3023 } else { 3024 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3025 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3026 } 3027 3028 if ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT6410) || 3029 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_CX700_IDE) || 3030 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX700_IDE)) { 3031 printf(": ATA133"); 3032 sc->sc_wdcdev.UDMA_cap = 6; 3033 } else { 3034 /* 3035 * Determine the DMA capabilities by looking at the 3036 * ISA bridge. 3037 */ 3038 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3039 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3040 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3041 3042 /* 3043 * XXX On the VT8237, the ISA bridge is on a different 3044 * device. 3045 */ 3046 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3047 pa->pa_device == 15) { 3048 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3049 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3050 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3051 } 3052 3053 switch (PCI_PRODUCT(id)) { 3054 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3055 if (PCI_REVISION(class) >= 0x02) { 3056 printf(": ATA33"); 3057 sc->sc_wdcdev.UDMA_cap = 2; 3058 } else { 3059 printf(": DMA"); 3060 sc->sc_wdcdev.UDMA_cap = 0; 3061 } 3062 break; 3063 case PCI_PRODUCT_VIATECH_VT82C596A: 3064 if (PCI_REVISION(class) >= 0x12) { 3065 printf(": ATA66"); 3066 sc->sc_wdcdev.UDMA_cap = 4; 3067 } else { 3068 printf(": ATA33"); 3069 sc->sc_wdcdev.UDMA_cap = 2; 3070 } 3071 break; 3072 3073 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3074 if (PCI_REVISION(class) >= 0x40) { 3075 printf(": ATA100"); 3076 sc->sc_wdcdev.UDMA_cap = 5; 3077 } else { 3078 printf(": ATA66"); 3079 sc->sc_wdcdev.UDMA_cap = 4; 3080 } 3081 break; 3082 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3083 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3084 printf(": ATA100"); 3085 sc->sc_wdcdev.UDMA_cap = 5; 3086 break; 3087 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3088 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3089 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3090 printf(": ATA133"); 3091 sc->sc_wdcdev.UDMA_cap = 6; 3092 break; 3093 default: 3094 printf(": DMA"); 3095 sc->sc_wdcdev.UDMA_cap = 0; 3096 break; 3097 } 3098 } 3099 3100 pciide_mapreg_dma(sc, pa); 3101 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3102 WDC_CAPABILITY_MODE; 3103 if (sc->sc_dma_ok) { 3104 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3105 sc->sc_wdcdev.irqack = pciide_irqack; 3106 if (sc->sc_wdcdev.UDMA_cap > 0) 3107 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3108 } 3109 sc->sc_wdcdev.PIO_cap = 4; 3110 sc->sc_wdcdev.DMA_cap = 2; 3111 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3112 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3113 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3114 3115 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3116 3117 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3118 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3119 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3120 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3121 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3122 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3123 DEBUG_PROBE); 3124 3125 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3126 cp = &sc->pciide_channels[channel]; 3127 if (pciide_chansetup(sc, channel, interface) == 0) 3128 continue; 3129 3130 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 3131 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3132 printf("%s: %s ignored (disabled)\n", 3133 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3134 continue; 3135 } 3136 pciide_map_compat_intr(pa, cp, channel, interface); 3137 if (cp->hw_ok == 0) 3138 continue; 3139 3140 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3141 pciide_pci_intr); 3142 if (cp->hw_ok == 0) { 3143 goto next; 3144 } 3145 if (pciide_chan_candisable(cp)) { 3146 ideconf &= ~APO_IDECONF_EN(channel); 3147 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 3148 ideconf); 3149 } 3150 3151 if (cp->hw_ok == 0) 3152 goto next; 3153 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3154 next: 3155 if (cp->hw_ok == 0) 3156 pciide_unmap_compat_intr(pa, cp, channel, interface); 3157 } 3158 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3159 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3160 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3161 } 3162 3163 void 3164 apollo_setup_channel(struct channel_softc *chp) 3165 { 3166 u_int32_t udmatim_reg, datatim_reg; 3167 u_int8_t idedma_ctl; 3168 int mode, drive; 3169 struct ata_drive_datas *drvp; 3170 struct pciide_channel *cp = (struct pciide_channel *)chp; 3171 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3172 3173 idedma_ctl = 0; 3174 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3175 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3176 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3177 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3178 3179 /* setup DMA if needed */ 3180 pciide_channel_dma_setup(cp); 3181 3182 /* 3183 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3184 * downgrade to Ultra/33 if needed 3185 */ 3186 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3187 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3188 /* both drives UDMA */ 3189 if (chp->ch_drive[0].UDMA_mode > 2 && 3190 chp->ch_drive[1].UDMA_mode <= 2) { 3191 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3192 chp->ch_drive[0].UDMA_mode = 2; 3193 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3194 chp->ch_drive[0].UDMA_mode <= 2) { 3195 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3196 chp->ch_drive[1].UDMA_mode = 2; 3197 } 3198 } 3199 3200 for (drive = 0; drive < 2; drive++) { 3201 drvp = &chp->ch_drive[drive]; 3202 /* If no drive, skip */ 3203 if ((drvp->drive_flags & DRIVE) == 0) 3204 continue; 3205 /* add timing values, setup DMA if needed */ 3206 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3207 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3208 mode = drvp->PIO_mode; 3209 goto pio; 3210 } 3211 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3212 (drvp->drive_flags & DRIVE_UDMA)) { 3213 /* use Ultra/DMA */ 3214 drvp->drive_flags &= ~DRIVE_DMA; 3215 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3216 APO_UDMA_EN_MTH(chp->channel, drive); 3217 if (sc->sc_wdcdev.UDMA_cap == 6) { 3218 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3219 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3220 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3221 /* 686b */ 3222 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3223 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3224 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3225 /* 596b or 686a */ 3226 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3227 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3228 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3229 } else { 3230 /* 596a or 586b */ 3231 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3232 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3233 } 3234 /* can use PIO timings, MW DMA unused */ 3235 mode = drvp->PIO_mode; 3236 } else { 3237 /* use Multiword DMA */ 3238 drvp->drive_flags &= ~DRIVE_UDMA; 3239 /* mode = min(pio, dma+2) */ 3240 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3241 mode = drvp->PIO_mode; 3242 else 3243 mode = drvp->DMA_mode + 2; 3244 } 3245 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3246 3247 pio: /* setup PIO mode */ 3248 if (mode <= 2) { 3249 drvp->DMA_mode = 0; 3250 drvp->PIO_mode = 0; 3251 mode = 0; 3252 } else { 3253 drvp->PIO_mode = mode; 3254 drvp->DMA_mode = mode - 2; 3255 } 3256 datatim_reg |= 3257 APO_DATATIM_PULSE(chp->channel, drive, 3258 apollo_pio_set[mode]) | 3259 APO_DATATIM_RECOV(chp->channel, drive, 3260 apollo_pio_rec[mode]); 3261 } 3262 if (idedma_ctl != 0) { 3263 /* Add software bits in status register */ 3264 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3265 IDEDMA_CTL(chp->channel), 3266 idedma_ctl); 3267 } 3268 pciide_print_modes(cp); 3269 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3270 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3271 } 3272 3273 void 3274 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3275 int channel) 3276 { 3277 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3278 bus_size_t cmdsize, ctlsize; 3279 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3280 pcireg_t interface; 3281 int one_channel; 3282 3283 /* 3284 * The 0648/0649 can be told to identify as a RAID controller. 3285 * In this case, we have to fake interface 3286 */ 3287 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3288 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3289 PCIIDE_INTERFACE_SETTABLE(1); 3290 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3291 CMD_CONF_DSA1) 3292 interface |= PCIIDE_INTERFACE_PCI(0) | 3293 PCIIDE_INTERFACE_PCI(1); 3294 } else { 3295 interface = PCI_INTERFACE(pa->pa_class); 3296 } 3297 3298 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3299 cp->name = PCIIDE_CHANNEL_NAME(channel); 3300 cp->wdc_channel.channel = channel; 3301 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3302 3303 /* 3304 * Older CMD64X doesn't have independant channels 3305 */ 3306 switch (sc->sc_pp->ide_product) { 3307 case PCI_PRODUCT_CMDTECH_649: 3308 one_channel = 0; 3309 break; 3310 default: 3311 one_channel = 1; 3312 break; 3313 } 3314 3315 if (channel > 0 && one_channel) { 3316 cp->wdc_channel.ch_queue = 3317 sc->pciide_channels[0].wdc_channel.ch_queue; 3318 } else { 3319 cp->wdc_channel.ch_queue = 3320 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3321 } 3322 if (cp->wdc_channel.ch_queue == NULL) { 3323 printf( 3324 "%s: %s cannot allocate memory for command queue", 3325 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3326 return; 3327 } 3328 3329 /* 3330 * with a CMD PCI64x, if we get here, the first channel is enabled: 3331 * there's no way to disable the first channel without disabling 3332 * the whole device 3333 */ 3334 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3335 printf("%s: %s ignored (disabled)\n", 3336 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3337 return; 3338 } 3339 cp->hw_ok = 1; 3340 pciide_map_compat_intr(pa, cp, channel, interface); 3341 if (cp->hw_ok == 0) 3342 return; 3343 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3344 if (cp->hw_ok == 0) { 3345 pciide_unmap_compat_intr(pa, cp, channel, interface); 3346 return; 3347 } 3348 if (pciide_chan_candisable(cp)) { 3349 if (channel == 1) { 3350 ctrl &= ~CMD_CTRL_2PORT; 3351 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3352 CMD_CTRL, ctrl); 3353 pciide_unmap_compat_intr(pa, cp, channel, interface); 3354 } 3355 } 3356 } 3357 3358 int 3359 cmd_pci_intr(void *arg) 3360 { 3361 struct pciide_softc *sc = arg; 3362 struct pciide_channel *cp; 3363 struct channel_softc *wdc_cp; 3364 int i, rv, crv; 3365 u_int32_t priirq, secirq; 3366 3367 rv = 0; 3368 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3369 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3370 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3371 cp = &sc->pciide_channels[i]; 3372 wdc_cp = &cp->wdc_channel; 3373 /* If a compat channel skip. */ 3374 if (cp->compat) 3375 continue; 3376 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3377 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3378 crv = wdcintr(wdc_cp); 3379 if (crv == 0) { 3380 #if 0 3381 printf("%s:%d: bogus intr\n", 3382 sc->sc_wdcdev.sc_dev.dv_xname, i); 3383 #endif 3384 } else 3385 rv = 1; 3386 } 3387 } 3388 return (rv); 3389 } 3390 3391 void 3392 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3393 { 3394 int channel; 3395 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3396 3397 printf(": no DMA"); 3398 sc->sc_dma_ok = 0; 3399 3400 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3401 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3402 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3403 3404 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3405 3406 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3407 cmd_channel_map(pa, sc, channel); 3408 } 3409 } 3410 3411 void 3412 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3413 { 3414 struct pciide_channel *cp; 3415 int channel; 3416 int rev = sc->sc_rev; 3417 pcireg_t interface; 3418 3419 /* 3420 * The 0648/0649 can be told to identify as a RAID controller. 3421 * In this case, we have to fake interface 3422 */ 3423 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3424 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3425 PCIIDE_INTERFACE_SETTABLE(1); 3426 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3427 CMD_CONF_DSA1) 3428 interface |= PCIIDE_INTERFACE_PCI(0) | 3429 PCIIDE_INTERFACE_PCI(1); 3430 } else { 3431 interface = PCI_INTERFACE(pa->pa_class); 3432 } 3433 3434 printf(": DMA"); 3435 pciide_mapreg_dma(sc, pa); 3436 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3437 WDC_CAPABILITY_MODE; 3438 if (sc->sc_dma_ok) { 3439 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3440 switch (sc->sc_pp->ide_product) { 3441 case PCI_PRODUCT_CMDTECH_649: 3442 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3443 sc->sc_wdcdev.UDMA_cap = 5; 3444 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3445 break; 3446 case PCI_PRODUCT_CMDTECH_648: 3447 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3448 sc->sc_wdcdev.UDMA_cap = 4; 3449 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3450 break; 3451 case PCI_PRODUCT_CMDTECH_646: 3452 if (rev >= CMD0646U2_REV) { 3453 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3454 sc->sc_wdcdev.UDMA_cap = 2; 3455 } else if (rev >= CMD0646U_REV) { 3456 /* 3457 * Linux's driver claims that the 646U is broken 3458 * with UDMA. Only enable it if we know what we're 3459 * doing 3460 */ 3461 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3462 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3463 sc->sc_wdcdev.UDMA_cap = 2; 3464 #endif 3465 /* explicitly disable UDMA */ 3466 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3467 CMD_UDMATIM(0), 0); 3468 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3469 CMD_UDMATIM(1), 0); 3470 } 3471 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3472 break; 3473 default: 3474 sc->sc_wdcdev.irqack = pciide_irqack; 3475 } 3476 } 3477 3478 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3479 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3480 sc->sc_wdcdev.PIO_cap = 4; 3481 sc->sc_wdcdev.DMA_cap = 2; 3482 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3483 3484 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3485 3486 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3487 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3488 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3489 DEBUG_PROBE); 3490 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3491 cp = &sc->pciide_channels[channel]; 3492 cmd_channel_map(pa, sc, channel); 3493 if (cp->hw_ok == 0) 3494 continue; 3495 cmd0643_9_setup_channel(&cp->wdc_channel); 3496 } 3497 /* 3498 * note - this also makes sure we clear the irq disable and reset 3499 * bits 3500 */ 3501 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3502 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3503 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3504 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3505 DEBUG_PROBE); 3506 } 3507 3508 void 3509 cmd0643_9_setup_channel(struct channel_softc *chp) 3510 { 3511 struct ata_drive_datas *drvp; 3512 u_int8_t tim; 3513 u_int32_t idedma_ctl, udma_reg; 3514 int drive; 3515 struct pciide_channel *cp = (struct pciide_channel *)chp; 3516 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3517 3518 idedma_ctl = 0; 3519 /* setup DMA if needed */ 3520 pciide_channel_dma_setup(cp); 3521 3522 for (drive = 0; drive < 2; drive++) { 3523 drvp = &chp->ch_drive[drive]; 3524 /* If no drive, skip */ 3525 if ((drvp->drive_flags & DRIVE) == 0) 3526 continue; 3527 /* add timing values, setup DMA if needed */ 3528 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3529 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3530 if (drvp->drive_flags & DRIVE_UDMA) { 3531 /* UltraDMA on a 646U2, 0648 or 0649 */ 3532 drvp->drive_flags &= ~DRIVE_DMA; 3533 udma_reg = pciide_pci_read(sc->sc_pc, 3534 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3535 if (drvp->UDMA_mode > 2 && 3536 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3537 CMD_BICSR) & 3538 CMD_BICSR_80(chp->channel)) == 0) { 3539 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3540 "80-wire cable not detected\n", 3541 drvp->drive_name, 3542 sc->sc_wdcdev.sc_dev.dv_xname, 3543 chp->channel, drive), DEBUG_PROBE); 3544 drvp->UDMA_mode = 2; 3545 } 3546 if (drvp->UDMA_mode > 2) 3547 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 3548 else if (sc->sc_wdcdev.UDMA_cap > 2) 3549 udma_reg |= CMD_UDMATIM_UDMA33(drive); 3550 udma_reg |= CMD_UDMATIM_UDMA(drive); 3551 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 3552 CMD_UDMATIM_TIM_OFF(drive)); 3553 udma_reg |= 3554 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 3555 CMD_UDMATIM_TIM_OFF(drive)); 3556 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3557 CMD_UDMATIM(chp->channel), udma_reg); 3558 } else { 3559 /* 3560 * use Multiword DMA. 3561 * Timings will be used for both PIO and DMA, 3562 * so adjust DMA mode if needed 3563 * if we have a 0646U2/8/9, turn off UDMA 3564 */ 3565 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 3566 udma_reg = pciide_pci_read(sc->sc_pc, 3567 sc->sc_tag, 3568 CMD_UDMATIM(chp->channel)); 3569 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 3570 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3571 CMD_UDMATIM(chp->channel), 3572 udma_reg); 3573 } 3574 if (drvp->PIO_mode >= 3 && 3575 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3576 drvp->DMA_mode = drvp->PIO_mode - 2; 3577 } 3578 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 3579 } 3580 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3581 } 3582 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3583 CMD_DATA_TIM(chp->channel, drive), tim); 3584 } 3585 if (idedma_ctl != 0) { 3586 /* Add software bits in status register */ 3587 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3588 IDEDMA_CTL(chp->channel), 3589 idedma_ctl); 3590 } 3591 pciide_print_modes(cp); 3592 #ifdef __sparc64__ 3593 /* 3594 * The Ultra 5 has a tendency to hang during reboot. This is due 3595 * to the PCI0646U asserting a PCI interrupt line when the chip 3596 * registers claim that it is not. Performing a reset at this 3597 * point appears to eliminate the symptoms. It is likely the 3598 * real cause is still lurking somewhere in the code. 3599 */ 3600 wdcreset(chp, SILENT); 3601 #endif /* __sparc64__ */ 3602 } 3603 3604 void 3605 cmd646_9_irqack(struct channel_softc *chp) 3606 { 3607 u_int32_t priirq, secirq; 3608 struct pciide_channel *cp = (struct pciide_channel *)chp; 3609 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3610 3611 if (chp->channel == 0) { 3612 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3613 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 3614 } else { 3615 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3616 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 3617 } 3618 pciide_irqack(chp); 3619 } 3620 3621 void 3622 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3623 { 3624 struct pciide_channel *cp; 3625 int channel; 3626 3627 printf("\n%s: bus-master DMA support present", 3628 sc->sc_wdcdev.sc_dev.dv_xname); 3629 pciide_mapreg_dma(sc, pa); 3630 printf("\n"); 3631 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3632 WDC_CAPABILITY_MODE; 3633 if (sc->sc_dma_ok) { 3634 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3635 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3636 sc->sc_wdcdev.UDMA_cap = 6; 3637 sc->sc_wdcdev.irqack = pciide_irqack; 3638 } 3639 3640 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3641 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3642 sc->sc_wdcdev.PIO_cap = 4; 3643 sc->sc_wdcdev.DMA_cap = 2; 3644 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 3645 3646 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 3647 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 3648 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 3649 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 3650 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3651 cp = &sc->pciide_channels[channel]; 3652 cmd680_channel_map(pa, sc, channel); 3653 if (cp->hw_ok == 0) 3654 continue; 3655 cmd680_setup_channel(&cp->wdc_channel); 3656 } 3657 } 3658 3659 void 3660 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3661 int channel) 3662 { 3663 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3664 bus_size_t cmdsize, ctlsize; 3665 int interface, i, reg; 3666 static const u_int8_t init_val[] = 3667 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 3668 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 3669 3670 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3671 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3672 PCIIDE_INTERFACE_SETTABLE(1); 3673 interface |= PCIIDE_INTERFACE_PCI(0) | 3674 PCIIDE_INTERFACE_PCI(1); 3675 } else { 3676 interface = PCI_INTERFACE(pa->pa_class); 3677 } 3678 3679 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3680 cp->name = PCIIDE_CHANNEL_NAME(channel); 3681 cp->wdc_channel.channel = channel; 3682 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3683 3684 cp->wdc_channel.ch_queue = 3685 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3686 if (cp->wdc_channel.ch_queue == NULL) { 3687 printf("%s %s: " 3688 "can't allocate memory for command queue", 3689 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3690 return; 3691 } 3692 3693 /* XXX */ 3694 reg = 0xa2 + channel * 16; 3695 for (i = 0; i < sizeof(init_val); i++) 3696 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 3697 3698 printf("%s: %s %s to %s mode\n", 3699 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 3700 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 3701 "configured" : "wired", 3702 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 3703 "native-PCI" : "compatibility"); 3704 3705 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 3706 if (cp->hw_ok == 0) 3707 return; 3708 pciide_map_compat_intr(pa, cp, channel, interface); 3709 } 3710 3711 void 3712 cmd680_setup_channel(struct channel_softc *chp) 3713 { 3714 struct ata_drive_datas *drvp; 3715 u_int8_t mode, off, scsc; 3716 u_int16_t val; 3717 u_int32_t idedma_ctl; 3718 int drive; 3719 struct pciide_channel *cp = (struct pciide_channel *)chp; 3720 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3721 pci_chipset_tag_t pc = sc->sc_pc; 3722 pcitag_t pa = sc->sc_tag; 3723 static const u_int8_t udma2_tbl[] = 3724 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 3725 static const u_int8_t udma_tbl[] = 3726 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 3727 static const u_int16_t dma_tbl[] = 3728 { 0x2208, 0x10c2, 0x10c1 }; 3729 static const u_int16_t pio_tbl[] = 3730 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 3731 3732 idedma_ctl = 0; 3733 pciide_channel_dma_setup(cp); 3734 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 3735 3736 for (drive = 0; drive < 2; drive++) { 3737 drvp = &chp->ch_drive[drive]; 3738 /* If no drive, skip */ 3739 if ((drvp->drive_flags & DRIVE) == 0) 3740 continue; 3741 mode &= ~(0x03 << (drive * 4)); 3742 if (drvp->drive_flags & DRIVE_UDMA) { 3743 drvp->drive_flags &= ~DRIVE_DMA; 3744 off = 0xa0 + chp->channel * 16; 3745 if (drvp->UDMA_mode > 2 && 3746 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 3747 drvp->UDMA_mode = 2; 3748 scsc = pciide_pci_read(pc, pa, 0x8a); 3749 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 3750 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 3751 scsc = pciide_pci_read(pc, pa, 0x8a); 3752 if ((scsc & 0x30) == 0) 3753 drvp->UDMA_mode = 5; 3754 } 3755 mode |= 0x03 << (drive * 4); 3756 off = 0xac + chp->channel * 16 + drive * 2; 3757 val = pciide_pci_read(pc, pa, off) & ~0x3f; 3758 if (scsc & 0x30) 3759 val |= udma2_tbl[drvp->UDMA_mode]; 3760 else 3761 val |= udma_tbl[drvp->UDMA_mode]; 3762 pciide_pci_write(pc, pa, off, val); 3763 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3764 } else if (drvp->drive_flags & DRIVE_DMA) { 3765 mode |= 0x02 << (drive * 4); 3766 off = 0xa8 + chp->channel * 16 + drive * 2; 3767 val = dma_tbl[drvp->DMA_mode]; 3768 pciide_pci_write(pc, pa, off, val & 0xff); 3769 pciide_pci_write(pc, pa, off, val >> 8); 3770 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3771 } else { 3772 mode |= 0x01 << (drive * 4); 3773 off = 0xa4 + chp->channel * 16 + drive * 2; 3774 val = pio_tbl[drvp->PIO_mode]; 3775 pciide_pci_write(pc, pa, off, val & 0xff); 3776 pciide_pci_write(pc, pa, off, val >> 8); 3777 } 3778 } 3779 3780 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 3781 if (idedma_ctl != 0) { 3782 /* Add software bits in status register */ 3783 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3784 IDEDMA_CTL(chp->channel), 3785 idedma_ctl); 3786 } 3787 pciide_print_modes(cp); 3788 } 3789 3790 /* 3791 * When the Silicon Image 3112 retries a PCI memory read command, 3792 * it may retry it as a memory read multiple command under some 3793 * circumstances. This can totally confuse some PCI controllers, 3794 * so ensure that it will never do this by making sure that the 3795 * Read Threshold (FIFO Read Request Control) field of the FIFO 3796 * Valid Byte Count and Control registers for both channels (BA5 3797 * offset 0x40 and 0x44) are set to be at least as large as the 3798 * cacheline size register. 3799 */ 3800 void 3801 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 3802 { 3803 pcireg_t cls, reg40, reg44; 3804 3805 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 3806 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 3807 cls *= 4; 3808 if (cls > 224) { 3809 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 3810 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 3811 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 3812 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 3813 cls = 224; 3814 } 3815 if (cls < 32) 3816 cls = 32; 3817 cls = (cls + 31) / 32; 3818 reg40 = ba5_read_4(sc, 0x40); 3819 reg44 = ba5_read_4(sc, 0x44); 3820 if ((reg40 & 0x7) < cls) 3821 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 3822 if ((reg44 & 0x7) < cls) 3823 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 3824 } 3825 3826 void 3827 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3828 { 3829 struct pciide_channel *cp; 3830 bus_size_t cmdsize, ctlsize; 3831 pcireg_t interface, scs_cmd, cfgctl; 3832 int channel; 3833 struct pciide_satalink *sl = sc->sc_cookie; 3834 3835 /* Allocate memory for private data */ 3836 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 3837 sl = sc->sc_cookie; 3838 3839 #define SII3112_RESET_BITS \ 3840 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 3841 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 3842 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 3843 3844 /* 3845 * Reset everything and then unblock all of the interrupts. 3846 */ 3847 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 3848 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 3849 scs_cmd | SII3112_RESET_BITS); 3850 delay(50 * 1000); 3851 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 3852 scs_cmd & SCS_CMD_BA5_EN); 3853 delay(50 * 1000); 3854 3855 if (scs_cmd & SCS_CMD_BA5_EN) { 3856 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 3857 PCI_MAPREG_TYPE_MEM | 3858 PCI_MAPREG_MEM_TYPE_32BIT, 0, 3859 &sl->ba5_st, &sl->ba5_sh, 3860 NULL, NULL, 0) != 0) 3861 printf(": unable to map BA5 register space\n"); 3862 else 3863 sl->ba5_en = 1; 3864 } else { 3865 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 3866 SII3112_PCI_CFGCTL); 3867 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 3868 cfgctl | CFGCTL_BA5INDEN); 3869 } 3870 3871 printf(": DMA"); 3872 pciide_mapreg_dma(sc, pa); 3873 printf("\n"); 3874 3875 /* 3876 * Rev. <= 0x01 of the 3112 have a bug that can cause data 3877 * corruption if DMA transfers cross an 8K boundary. This is 3878 * apparently hard to tickle, but we'll go ahead and play it 3879 * safe. 3880 */ 3881 if (sc->sc_rev <= 0x01) { 3882 sc->sc_dma_maxsegsz = 8192; 3883 sc->sc_dma_boundary = 8192; 3884 } 3885 3886 sii_fixup_cacheline(sc, pa); 3887 3888 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 3889 sc->sc_wdcdev.PIO_cap = 4; 3890 if (sc->sc_dma_ok) { 3891 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3892 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3893 sc->sc_wdcdev.irqack = pciide_irqack; 3894 sc->sc_wdcdev.DMA_cap = 2; 3895 sc->sc_wdcdev.UDMA_cap = 6; 3896 } 3897 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 3898 3899 /* We can use SControl and SStatus to probe for drives. */ 3900 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 3901 3902 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3903 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3904 3905 /* 3906 * The 3112 either identifies itself as a RAID storage device 3907 * or a Misc storage device. Fake up the interface bits for 3908 * what our driver expects. 3909 */ 3910 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3911 interface = PCI_INTERFACE(pa->pa_class); 3912 } else { 3913 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3914 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3915 } 3916 3917 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3918 cp = &sc->pciide_channels[channel]; 3919 if (pciide_chansetup(sc, channel, interface) == 0) 3920 continue; 3921 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3922 pciide_pci_intr); 3923 if (cp->hw_ok == 0) 3924 continue; 3925 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 3926 } 3927 } 3928 3929 void 3930 sii3112_setup_channel(struct channel_softc *chp) 3931 { 3932 struct ata_drive_datas *drvp; 3933 int drive; 3934 u_int32_t idedma_ctl, dtm; 3935 struct pciide_channel *cp = (struct pciide_channel *)chp; 3936 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3937 3938 /* setup DMA if needed */ 3939 pciide_channel_dma_setup(cp); 3940 3941 idedma_ctl = 0; 3942 dtm = 0; 3943 3944 for (drive = 0; drive < 2; drive++) { 3945 drvp = &chp->ch_drive[drive]; 3946 /* If no drive, skip */ 3947 if ((drvp->drive_flags & DRIVE) == 0) 3948 continue; 3949 if (drvp->drive_flags & DRIVE_UDMA) { 3950 /* use Ultra/DMA */ 3951 drvp->drive_flags &= ~DRIVE_DMA; 3952 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3953 dtm |= DTM_IDEx_DMA; 3954 } else if (drvp->drive_flags & DRIVE_DMA) { 3955 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3956 dtm |= DTM_IDEx_DMA; 3957 } else { 3958 dtm |= DTM_IDEx_PIO; 3959 } 3960 } 3961 3962 /* 3963 * Nothing to do to setup modes; it is meaningless in S-ATA 3964 * (but many S-ATA drives still want to get the SET_FEATURE 3965 * command). 3966 */ 3967 if (idedma_ctl != 0) { 3968 /* Add software bits in status register */ 3969 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 3970 } 3971 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 3972 pciide_print_modes(cp); 3973 } 3974 3975 void 3976 sii3112_drv_probe(struct channel_softc *chp) 3977 { 3978 struct pciide_channel *cp = (struct pciide_channel *)chp; 3979 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3980 uint32_t scontrol, sstatus; 3981 uint8_t scnt, sn, cl, ch; 3982 int i, s; 3983 3984 /* XXX This should be done by other code. */ 3985 for (i = 0; i < 2; i++) { 3986 chp->ch_drive[i].chnl_softc = chp; 3987 chp->ch_drive[i].drive = i; 3988 } 3989 3990 /* 3991 * The 3112 is a 2-port part, and only has one drive per channel 3992 * (each port emulates a master drive). 3993 * 3994 * The 3114 is similar, but has 4 channels. 3995 */ 3996 3997 /* 3998 * Request communication initialization sequence, any speed. 3999 * Performing this is the equivalent of an ATA Reset. 4000 */ 4001 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4002 4003 /* 4004 * XXX We don't yet support SATA power management; disable all 4005 * power management state transitions. 4006 */ 4007 scontrol |= SControl_IPM_NONE; 4008 4009 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4010 delay(50 * 1000); 4011 scontrol &= ~SControl_DET_INIT; 4012 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4013 delay(50 * 1000); 4014 4015 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4016 #if 0 4017 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4018 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4019 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4020 #endif 4021 switch (sstatus & SStatus_DET_mask) { 4022 case SStatus_DET_NODEV: 4023 /* No device; be silent. */ 4024 break; 4025 4026 case SStatus_DET_DEV_NE: 4027 printf("%s: port %d: device connected, but " 4028 "communication not established\n", 4029 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4030 break; 4031 4032 case SStatus_DET_OFFLINE: 4033 printf("%s: port %d: PHY offline\n", 4034 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4035 break; 4036 4037 case SStatus_DET_DEV: 4038 /* 4039 * XXX ATAPI detection doesn't currently work. Don't 4040 * XXX know why. But, it's not like the standard method 4041 * XXX can detect an ATAPI device connected via a SATA/PATA 4042 * XXX bridge, so at least this is no worse. --thorpej 4043 */ 4044 if (chp->_vtbl != NULL) 4045 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4046 else 4047 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4048 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4049 delay(10); /* 400ns delay */ 4050 /* Save register contents. */ 4051 if (chp->_vtbl != NULL) { 4052 scnt = CHP_READ_REG(chp, wdr_seccnt); 4053 sn = CHP_READ_REG(chp, wdr_sector); 4054 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4055 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4056 } else { 4057 scnt = bus_space_read_1(chp->cmd_iot, 4058 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4059 sn = bus_space_read_1(chp->cmd_iot, 4060 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4061 cl = bus_space_read_1(chp->cmd_iot, 4062 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4063 ch = bus_space_read_1(chp->cmd_iot, 4064 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4065 } 4066 #if 0 4067 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4068 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4069 scnt, sn, cl, ch); 4070 #endif 4071 /* 4072 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4073 * cases we get wrong values here, so ignore it. 4074 */ 4075 s = splbio(); 4076 if (cl == 0x14 && ch == 0xeb) 4077 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4078 else 4079 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4080 splx(s); 4081 4082 printf("%s: port %d: device present", 4083 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4084 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4085 case 1: 4086 printf(", speed: 1.5Gb/s"); 4087 break; 4088 case 2: 4089 printf(", speed: 3.0Gb/s"); 4090 break; 4091 } 4092 printf("\n"); 4093 break; 4094 4095 default: 4096 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4097 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4098 } 4099 } 4100 4101 void 4102 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4103 { 4104 struct pciide_channel *cp; 4105 pcireg_t scs_cmd; 4106 pci_intr_handle_t intrhandle; 4107 const char *intrstr; 4108 int channel; 4109 struct pciide_satalink *sl = sc->sc_cookie; 4110 4111 /* Allocate memory for private data */ 4112 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4113 sl = sc->sc_cookie; 4114 4115 #define SII3114_RESET_BITS \ 4116 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4117 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4118 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4119 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4120 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4121 4122 /* 4123 * Reset everything and then unblock all of the interrupts. 4124 */ 4125 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4126 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4127 scs_cmd | SII3114_RESET_BITS); 4128 delay(50 * 1000); 4129 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4130 scs_cmd & SCS_CMD_M66EN); 4131 delay(50 * 1000); 4132 4133 /* 4134 * On the 3114, the BA5 register space is always enabled. In 4135 * order to use the 3114 in any sane way, we must use this BA5 4136 * register space, and so we consider it an error if we cannot 4137 * map it. 4138 * 4139 * As a consequence of using BA5, our register mapping is different 4140 * from a normal PCI IDE controller's, and so we are unable to use 4141 * most of the common PCI IDE register mapping functions. 4142 */ 4143 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4144 PCI_MAPREG_TYPE_MEM | 4145 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4146 &sl->ba5_st, &sl->ba5_sh, 4147 NULL, NULL, 0) != 0) { 4148 printf(": unable to map BA5 register space\n"); 4149 return; 4150 } 4151 sl->ba5_en = 1; 4152 4153 /* 4154 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4155 * channel 2. This is required at all times for proper operation 4156 * when using the BA5 register space (otherwise interrupts from 4157 * all 4 channels won't work). 4158 */ 4159 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4160 4161 printf(": DMA"); 4162 sii3114_mapreg_dma(sc, pa); 4163 printf("\n"); 4164 4165 sii_fixup_cacheline(sc, pa); 4166 4167 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4168 sc->sc_wdcdev.PIO_cap = 4; 4169 if (sc->sc_dma_ok) { 4170 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4171 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4172 sc->sc_wdcdev.irqack = pciide_irqack; 4173 sc->sc_wdcdev.DMA_cap = 2; 4174 sc->sc_wdcdev.UDMA_cap = 6; 4175 } 4176 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4177 4178 /* We can use SControl and SStatus to probe for drives. */ 4179 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4180 4181 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4182 sc->sc_wdcdev.nchannels = 4; 4183 4184 /* Map and establish the interrupt handler. */ 4185 if (pci_intr_map(pa, &intrhandle) != 0) { 4186 printf("%s: couldn't map native-PCI interrupt\n", 4187 sc->sc_wdcdev.sc_dev.dv_xname); 4188 return; 4189 } 4190 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4191 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4192 /* XXX */ 4193 pciide_pci_intr, sc, 4194 sc->sc_wdcdev.sc_dev.dv_xname); 4195 if (sc->sc_pci_ih != NULL) { 4196 printf("%s: using %s for native-PCI interrupt\n", 4197 sc->sc_wdcdev.sc_dev.dv_xname, 4198 intrstr ? intrstr : "unknown interrupt"); 4199 } else { 4200 printf("%s: couldn't establish native-PCI interrupt", 4201 sc->sc_wdcdev.sc_dev.dv_xname); 4202 if (intrstr != NULL) 4203 printf(" at %s", intrstr); 4204 printf("\n"); 4205 return; 4206 } 4207 4208 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4209 cp = &sc->pciide_channels[channel]; 4210 if (sii3114_chansetup(sc, channel) == 0) 4211 continue; 4212 sii3114_mapchan(cp); 4213 if (cp->hw_ok == 0) 4214 continue; 4215 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4216 } 4217 } 4218 4219 void 4220 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4221 { 4222 int chan, reg; 4223 bus_size_t size; 4224 struct pciide_satalink *sl = sc->sc_cookie; 4225 4226 sc->sc_wdcdev.dma_arg = sc; 4227 sc->sc_wdcdev.dma_init = pciide_dma_init; 4228 sc->sc_wdcdev.dma_start = pciide_dma_start; 4229 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4230 4231 /* 4232 * Slice off a subregion of BA5 for each of the channel's DMA 4233 * registers. 4234 */ 4235 4236 sc->sc_dma_iot = sl->ba5_st; 4237 for (chan = 0; chan < 4; chan++) { 4238 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4239 size = 4; 4240 if (size > (IDEDMA_SCH_OFFSET - reg)) 4241 size = IDEDMA_SCH_OFFSET - reg; 4242 if (bus_space_subregion(sl->ba5_st, 4243 sl->ba5_sh, 4244 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4245 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4246 sc->sc_dma_ok = 0; 4247 printf(": can't subregion offset " 4248 "%lu size %lu", 4249 (u_long) satalink_ba5_regmap[ 4250 chan].ba5_IDEDMA_CMD + reg, 4251 (u_long) size); 4252 return; 4253 } 4254 } 4255 } 4256 4257 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4258 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4259 sc->sc_dmactl_read = sii3114_dmactl_read; 4260 sc->sc_dmactl_write = sii3114_dmactl_write; 4261 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4262 4263 /* DMA registers all set up! */ 4264 sc->sc_dmat = pa->pa_dmat; 4265 sc->sc_dma_ok = 1; 4266 } 4267 4268 int 4269 sii3114_chansetup(struct pciide_softc *sc, int channel) 4270 { 4271 static const char *channel_names[] = { 4272 "port 0", 4273 "port 1", 4274 "port 2", 4275 "port 3", 4276 }; 4277 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4278 4279 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4280 4281 /* 4282 * We must always keep the Interrupt Steering bit set in channel 2's 4283 * IDEDMA_CMD register. 4284 */ 4285 if (channel == 2) 4286 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4287 4288 cp->name = channel_names[channel]; 4289 cp->wdc_channel.channel = channel; 4290 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4291 cp->wdc_channel.ch_queue = 4292 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 4293 if (cp->wdc_channel.ch_queue == NULL) { 4294 printf("%s %s channel: " 4295 "can't allocate memory for command queue", 4296 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4297 return (0); 4298 } 4299 return (1); 4300 } 4301 4302 void 4303 sii3114_mapchan(struct pciide_channel *cp) 4304 { 4305 struct channel_softc *wdc_cp = &cp->wdc_channel; 4306 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4307 struct pciide_satalink *sl = sc->sc_cookie; 4308 int chan = wdc_cp->channel; 4309 int i; 4310 4311 cp->hw_ok = 0; 4312 cp->compat = 0; 4313 cp->ih = sc->sc_pci_ih; 4314 4315 sl->regs[chan].cmd_iot = sl->ba5_st; 4316 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4317 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4318 9, &sl->regs[chan].cmd_baseioh) != 0) { 4319 printf("%s: couldn't subregion %s cmd base\n", 4320 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4321 return; 4322 } 4323 4324 sl->regs[chan].ctl_iot = sl->ba5_st; 4325 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4326 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4327 1, &cp->ctl_baseioh) != 0) { 4328 printf("%s: couldn't subregion %s ctl base\n", 4329 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4330 return; 4331 } 4332 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4333 4334 for (i = 0; i < WDC_NREG; i++) { 4335 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4336 sl->regs[chan].cmd_baseioh, 4337 i, i == 0 ? 4 : 1, 4338 &sl->regs[chan].cmd_iohs[i]) != 0) { 4339 printf("%s: couldn't subregion %s channel " 4340 "cmd regs\n", 4341 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4342 return; 4343 } 4344 } 4345 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4346 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4347 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4348 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4349 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4350 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4351 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4352 wdcattach(wdc_cp); 4353 cp->hw_ok = 1; 4354 } 4355 4356 u_int8_t 4357 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4358 { 4359 struct pciide_channel *cp = (struct pciide_channel *)chp; 4360 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4361 struct pciide_satalink *sl = sc->sc_cookie; 4362 4363 if (reg & _WDC_AUX) 4364 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4365 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4366 else 4367 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4368 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4369 } 4370 4371 void 4372 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4373 { 4374 struct pciide_channel *cp = (struct pciide_channel *)chp; 4375 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4376 struct pciide_satalink *sl = sc->sc_cookie; 4377 4378 if (reg & _WDC_AUX) 4379 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4380 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4381 else 4382 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4383 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4384 0, val); 4385 } 4386 4387 u_int8_t 4388 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4389 { 4390 struct pciide_satalink *sl = sc->sc_cookie; 4391 4392 return (bus_space_read_1(sc->sc_dma_iot, 4393 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4394 } 4395 4396 void 4397 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4398 { 4399 struct pciide_satalink *sl = sc->sc_cookie; 4400 4401 bus_space_write_1(sc->sc_dma_iot, 4402 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4403 } 4404 4405 u_int8_t 4406 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4407 { 4408 struct pciide_satalink *sl = sc->sc_cookie; 4409 4410 return (bus_space_read_1(sc->sc_dma_iot, 4411 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4412 } 4413 4414 void 4415 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4416 { 4417 struct pciide_satalink *sl = sc->sc_cookie; 4418 4419 bus_space_write_1(sc->sc_dma_iot, 4420 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4421 } 4422 4423 void 4424 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4425 { 4426 struct pciide_satalink *sl = sc->sc_cookie; 4427 4428 bus_space_write_4(sc->sc_dma_iot, 4429 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4430 } 4431 4432 void 4433 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4434 { 4435 struct pciide_channel *cp; 4436 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4437 bus_size_t cmdsize, ctlsize; 4438 struct pciide_cy *cy; 4439 4440 /* Allocate memory for private data */ 4441 sc->sc_cookie = malloc(sizeof(*cy), M_DEVBUF, M_NOWAIT | M_ZERO); 4442 cy = sc->sc_cookie; 4443 4444 /* 4445 * this chip has 2 PCI IDE functions, one for primary and one for 4446 * secondary. So we need to call pciide_mapregs_compat() with 4447 * the real channel 4448 */ 4449 if (pa->pa_function == 1) { 4450 cy->cy_compatchan = 0; 4451 } else if (pa->pa_function == 2) { 4452 cy->cy_compatchan = 1; 4453 } else { 4454 printf(": unexpected PCI function %d\n", pa->pa_function); 4455 return; 4456 } 4457 4458 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4459 printf(": DMA"); 4460 pciide_mapreg_dma(sc, pa); 4461 } else { 4462 printf(": no DMA"); 4463 sc->sc_dma_ok = 0; 4464 } 4465 4466 cy->cy_handle = cy82c693_init(pa->pa_iot); 4467 if (cy->cy_handle == NULL) { 4468 printf(", (unable to map ctl registers)"); 4469 sc->sc_dma_ok = 0; 4470 } 4471 4472 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4473 WDC_CAPABILITY_MODE; 4474 if (sc->sc_dma_ok) { 4475 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4476 sc->sc_wdcdev.irqack = pciide_irqack; 4477 } 4478 sc->sc_wdcdev.PIO_cap = 4; 4479 sc->sc_wdcdev.DMA_cap = 2; 4480 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4481 4482 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4483 sc->sc_wdcdev.nchannels = 1; 4484 4485 /* Only one channel for this chip; if we are here it's enabled */ 4486 cp = &sc->pciide_channels[0]; 4487 sc->wdc_chanarray[0] = &cp->wdc_channel; 4488 cp->name = PCIIDE_CHANNEL_NAME(0); 4489 cp->wdc_channel.channel = 0; 4490 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4491 cp->wdc_channel.ch_queue = 4492 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 4493 if (cp->wdc_channel.ch_queue == NULL) { 4494 printf(": cannot allocate memory for command queue\n"); 4495 return; 4496 } 4497 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4498 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4499 "configured" : "wired"); 4500 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4501 printf("native-PCI\n"); 4502 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4503 pciide_pci_intr); 4504 } else { 4505 printf("compatibility\n"); 4506 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4507 &cmdsize, &ctlsize); 4508 } 4509 4510 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4511 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4512 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4513 if (cp->hw_ok == 0) 4514 return; 4515 wdcattach(&cp->wdc_channel); 4516 if (pciide_chan_candisable(cp)) { 4517 pci_conf_write(sc->sc_pc, sc->sc_tag, 4518 PCI_COMMAND_STATUS_REG, 0); 4519 } 4520 if (cp->hw_ok == 0) { 4521 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4522 interface); 4523 return; 4524 } 4525 4526 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4527 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4528 cy693_setup_channel(&cp->wdc_channel); 4529 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4530 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4531 } 4532 4533 void 4534 cy693_setup_channel(struct channel_softc *chp) 4535 { 4536 struct ata_drive_datas *drvp; 4537 int drive; 4538 u_int32_t cy_cmd_ctrl; 4539 u_int32_t idedma_ctl; 4540 struct pciide_channel *cp = (struct pciide_channel *)chp; 4541 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4542 int dma_mode = -1; 4543 struct pciide_cy *cy = sc->sc_cookie; 4544 4545 cy_cmd_ctrl = idedma_ctl = 0; 4546 4547 /* setup DMA if needed */ 4548 pciide_channel_dma_setup(cp); 4549 4550 for (drive = 0; drive < 2; drive++) { 4551 drvp = &chp->ch_drive[drive]; 4552 /* If no drive, skip */ 4553 if ((drvp->drive_flags & DRIVE) == 0) 4554 continue; 4555 /* add timing values, setup DMA if needed */ 4556 if (drvp->drive_flags & DRIVE_DMA) { 4557 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4558 /* use Multiword DMA */ 4559 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 4560 dma_mode = drvp->DMA_mode; 4561 } 4562 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4563 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 4564 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4565 CY_CMD_CTRL_IOW_REC_OFF(drive)); 4566 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4567 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 4568 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4569 CY_CMD_CTRL_IOR_REC_OFF(drive)); 4570 } 4571 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 4572 chp->ch_drive[0].DMA_mode = dma_mode; 4573 chp->ch_drive[1].DMA_mode = dma_mode; 4574 4575 if (dma_mode == -1) 4576 dma_mode = 0; 4577 4578 if (cy->cy_handle != NULL) { 4579 /* Note: `multiple' is implied. */ 4580 cy82c693_write(cy->cy_handle, 4581 (cy->cy_compatchan == 0) ? 4582 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 4583 } 4584 4585 pciide_print_modes(cp); 4586 4587 if (idedma_ctl != 0) { 4588 /* Add software bits in status register */ 4589 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4590 IDEDMA_CTL(chp->channel), idedma_ctl); 4591 } 4592 } 4593 4594 static struct sis_hostbr_type { 4595 u_int16_t id; 4596 u_int8_t rev; 4597 u_int8_t udma_mode; 4598 char *name; 4599 u_int8_t type; 4600 #define SIS_TYPE_NOUDMA 0 4601 #define SIS_TYPE_66 1 4602 #define SIS_TYPE_100OLD 2 4603 #define SIS_TYPE_100NEW 3 4604 #define SIS_TYPE_133OLD 4 4605 #define SIS_TYPE_133NEW 5 4606 #define SIS_TYPE_SOUTH 6 4607 } sis_hostbr_type[] = { 4608 /* Most infos here are from sos@freebsd.org */ 4609 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 4610 #if 0 4611 /* 4612 * controllers associated to a rev 0x2 530 Host to PCI Bridge 4613 * have problems with UDMA (info provided by Christos) 4614 */ 4615 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 4616 #endif 4617 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 4618 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 4619 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 4620 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 4621 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 4622 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 4623 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 4624 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 4625 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 4626 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 4627 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 4628 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 4629 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 4630 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 4631 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 4632 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 4633 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 4634 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 4635 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 4636 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 4637 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 4638 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 4639 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 4640 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 4641 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 4642 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 4643 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 4644 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 4645 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 4646 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 4647 /* 4648 * From sos@freebsd.org: the 0x961 ID will never be found in real world 4649 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 4650 */ 4651 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 4652 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 4653 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 4654 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW} 4655 }; 4656 4657 static struct sis_hostbr_type *sis_hostbr_type_match; 4658 4659 int 4660 sis_hostbr_match(struct pci_attach_args *pa) 4661 { 4662 int i; 4663 4664 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 4665 return (0); 4666 sis_hostbr_type_match = NULL; 4667 for (i = 0; 4668 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 4669 i++) { 4670 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 4671 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 4672 sis_hostbr_type_match = &sis_hostbr_type[i]; 4673 } 4674 return (sis_hostbr_type_match != NULL); 4675 } 4676 4677 int 4678 sis_south_match(struct pci_attach_args *pa) 4679 { 4680 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 4681 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 4682 PCI_REVISION(pa->pa_class) >= 0x10); 4683 } 4684 4685 void 4686 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4687 { 4688 struct pciide_channel *cp; 4689 int channel; 4690 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 4691 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4692 int rev = sc->sc_rev; 4693 bus_size_t cmdsize, ctlsize; 4694 struct pciide_sis *sis; 4695 4696 /* Allocate memory for private data */ 4697 sc->sc_cookie = malloc(sizeof(*sis), M_DEVBUF, M_NOWAIT | M_ZERO); 4698 sis = sc->sc_cookie; 4699 4700 pci_find_device(NULL, sis_hostbr_match); 4701 4702 if (sis_hostbr_type_match) { 4703 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 4704 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 4705 pciide_pci_read(sc->sc_pc, sc->sc_tag, 4706 SIS_REG_57) & 0x7f); 4707 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 4708 sis->sis_type = SIS_TYPE_133NEW; 4709 sc->sc_wdcdev.UDMA_cap = 4710 sis_hostbr_type_match->udma_mode; 4711 } else { 4712 if (pci_find_device(NULL, sis_south_match)) { 4713 sis->sis_type = SIS_TYPE_133OLD; 4714 sc->sc_wdcdev.UDMA_cap = 4715 sis_hostbr_type_match->udma_mode; 4716 } else { 4717 sis->sis_type = SIS_TYPE_100NEW; 4718 sc->sc_wdcdev.UDMA_cap = 4719 sis_hostbr_type_match->udma_mode; 4720 } 4721 } 4722 } else { 4723 sis->sis_type = sis_hostbr_type_match->type; 4724 sc->sc_wdcdev.UDMA_cap = 4725 sis_hostbr_type_match->udma_mode; 4726 } 4727 printf(": %s", sis_hostbr_type_match->name); 4728 } else { 4729 printf(": 5597/5598"); 4730 if (rev >= 0xd0) { 4731 sc->sc_wdcdev.UDMA_cap = 2; 4732 sis->sis_type = SIS_TYPE_66; 4733 } else { 4734 sc->sc_wdcdev.UDMA_cap = 0; 4735 sis->sis_type = SIS_TYPE_NOUDMA; 4736 } 4737 } 4738 4739 printf(": DMA"); 4740 pciide_mapreg_dma(sc, pa); 4741 4742 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4743 WDC_CAPABILITY_MODE; 4744 if (sc->sc_dma_ok) { 4745 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4746 sc->sc_wdcdev.irqack = pciide_irqack; 4747 if (sis->sis_type >= SIS_TYPE_66) 4748 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4749 } 4750 4751 sc->sc_wdcdev.PIO_cap = 4; 4752 sc->sc_wdcdev.DMA_cap = 2; 4753 4754 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4755 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4756 switch (sis->sis_type) { 4757 case SIS_TYPE_NOUDMA: 4758 case SIS_TYPE_66: 4759 case SIS_TYPE_100OLD: 4760 sc->sc_wdcdev.set_modes = sis_setup_channel; 4761 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 4762 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 4763 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 4764 break; 4765 case SIS_TYPE_100NEW: 4766 case SIS_TYPE_133OLD: 4767 sc->sc_wdcdev.set_modes = sis_setup_channel; 4768 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 4769 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 4770 break; 4771 case SIS_TYPE_133NEW: 4772 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 4773 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 4774 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 4775 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 4776 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 4777 break; 4778 } 4779 4780 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4781 4782 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4783 cp = &sc->pciide_channels[channel]; 4784 if (pciide_chansetup(sc, channel, interface) == 0) 4785 continue; 4786 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 4787 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 4788 printf("%s: %s ignored (disabled)\n", 4789 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4790 continue; 4791 } 4792 pciide_map_compat_intr(pa, cp, channel, interface); 4793 if (cp->hw_ok == 0) 4794 continue; 4795 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4796 pciide_pci_intr); 4797 if (cp->hw_ok == 0) { 4798 pciide_unmap_compat_intr(pa, cp, channel, interface); 4799 continue; 4800 } 4801 if (pciide_chan_candisable(cp)) { 4802 if (channel == 0) 4803 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 4804 else 4805 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 4806 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 4807 sis_ctr0); 4808 } 4809 if (cp->hw_ok == 0) { 4810 pciide_unmap_compat_intr(pa, cp, channel, interface); 4811 continue; 4812 } 4813 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4814 } 4815 } 4816 4817 void 4818 sis96x_setup_channel(struct channel_softc *chp) 4819 { 4820 struct ata_drive_datas *drvp; 4821 int drive; 4822 u_int32_t sis_tim; 4823 u_int32_t idedma_ctl; 4824 int regtim; 4825 struct pciide_channel *cp = (struct pciide_channel *)chp; 4826 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4827 4828 sis_tim = 0; 4829 idedma_ctl = 0; 4830 /* setup DMA if needed */ 4831 pciide_channel_dma_setup(cp); 4832 4833 for (drive = 0; drive < 2; drive++) { 4834 regtim = SIS_TIM133( 4835 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 4836 chp->channel, drive); 4837 drvp = &chp->ch_drive[drive]; 4838 /* If no drive, skip */ 4839 if ((drvp->drive_flags & DRIVE) == 0) 4840 continue; 4841 /* add timing values, setup DMA if needed */ 4842 if (drvp->drive_flags & DRIVE_UDMA) { 4843 /* use Ultra/DMA */ 4844 drvp->drive_flags &= ~DRIVE_DMA; 4845 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 4846 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 4847 if (drvp->UDMA_mode > 2) 4848 drvp->UDMA_mode = 2; 4849 } 4850 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 4851 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 4852 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4853 } else if (drvp->drive_flags & DRIVE_DMA) { 4854 /* 4855 * use Multiword DMA 4856 * Timings will be used for both PIO and DMA, 4857 * so adjust DMA mode if needed 4858 */ 4859 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 4860 drvp->PIO_mode = drvp->DMA_mode + 2; 4861 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 4862 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 4863 drvp->PIO_mode - 2 : 0; 4864 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 4865 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4866 } else { 4867 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 4868 } 4869 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 4870 "channel %d drive %d: 0x%x (reg 0x%x)\n", 4871 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 4872 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 4873 } 4874 if (idedma_ctl != 0) { 4875 /* Add software bits in status register */ 4876 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4877 IDEDMA_CTL(chp->channel), idedma_ctl); 4878 } 4879 pciide_print_modes(cp); 4880 } 4881 4882 void 4883 sis_setup_channel(struct channel_softc *chp) 4884 { 4885 struct ata_drive_datas *drvp; 4886 int drive; 4887 u_int32_t sis_tim; 4888 u_int32_t idedma_ctl; 4889 struct pciide_channel *cp = (struct pciide_channel *)chp; 4890 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4891 struct pciide_sis *sis = sc->sc_cookie; 4892 4893 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 4894 "channel %d 0x%x\n", chp->channel, 4895 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 4896 DEBUG_PROBE); 4897 sis_tim = 0; 4898 idedma_ctl = 0; 4899 /* setup DMA if needed */ 4900 pciide_channel_dma_setup(cp); 4901 4902 for (drive = 0; drive < 2; drive++) { 4903 drvp = &chp->ch_drive[drive]; 4904 /* If no drive, skip */ 4905 if ((drvp->drive_flags & DRIVE) == 0) 4906 continue; 4907 /* add timing values, setup DMA if needed */ 4908 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 4909 (drvp->drive_flags & DRIVE_UDMA) == 0) 4910 goto pio; 4911 4912 if (drvp->drive_flags & DRIVE_UDMA) { 4913 /* use Ultra/DMA */ 4914 drvp->drive_flags &= ~DRIVE_DMA; 4915 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 4916 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 4917 if (drvp->UDMA_mode > 2) 4918 drvp->UDMA_mode = 2; 4919 } 4920 switch (sis->sis_type) { 4921 case SIS_TYPE_66: 4922 case SIS_TYPE_100OLD: 4923 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 4924 SIS_TIM66_UDMA_TIME_OFF(drive); 4925 break; 4926 case SIS_TYPE_100NEW: 4927 sis_tim |= 4928 sis_udma100new_tim[drvp->UDMA_mode] << 4929 SIS_TIM100_UDMA_TIME_OFF(drive); 4930 break; 4931 case SIS_TYPE_133OLD: 4932 sis_tim |= 4933 sis_udma133old_tim[drvp->UDMA_mode] << 4934 SIS_TIM100_UDMA_TIME_OFF(drive); 4935 break; 4936 default: 4937 printf("unknown SiS IDE type %d\n", 4938 sis->sis_type); 4939 } 4940 } else { 4941 /* 4942 * use Multiword DMA 4943 * Timings will be used for both PIO and DMA, 4944 * so adjust DMA mode if needed 4945 */ 4946 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 4947 drvp->PIO_mode = drvp->DMA_mode + 2; 4948 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 4949 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 4950 drvp->PIO_mode - 2 : 0; 4951 if (drvp->DMA_mode == 0) 4952 drvp->PIO_mode = 0; 4953 } 4954 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4955 pio: switch (sis->sis_type) { 4956 case SIS_TYPE_NOUDMA: 4957 case SIS_TYPE_66: 4958 case SIS_TYPE_100OLD: 4959 sis_tim |= sis_pio_act[drvp->PIO_mode] << 4960 SIS_TIM66_ACT_OFF(drive); 4961 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 4962 SIS_TIM66_REC_OFF(drive); 4963 break; 4964 case SIS_TYPE_100NEW: 4965 case SIS_TYPE_133OLD: 4966 sis_tim |= sis_pio_act[drvp->PIO_mode] << 4967 SIS_TIM100_ACT_OFF(drive); 4968 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 4969 SIS_TIM100_REC_OFF(drive); 4970 break; 4971 default: 4972 printf("unknown SiS IDE type %d\n", 4973 sis->sis_type); 4974 } 4975 } 4976 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 4977 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 4978 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 4979 if (idedma_ctl != 0) { 4980 /* Add software bits in status register */ 4981 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4982 IDEDMA_CTL(chp->channel), idedma_ctl); 4983 } 4984 pciide_print_modes(cp); 4985 } 4986 4987 void 4988 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4989 { 4990 struct pciide_channel *cp; 4991 int channel; 4992 pcireg_t interface, ctl; 4993 bus_size_t cmdsize, ctlsize; 4994 4995 printf(": DMA"); 4996 pciide_mapreg_dma(sc, pa); 4997 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 4998 4999 if (sc->sc_dma_ok) { 5000 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5001 sc->sc_wdcdev.irqack = natsemi_irqack; 5002 } 5003 5004 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5005 5006 /* 5007 * Mask off interrupts from both channels, appropriate channel(s) 5008 * will be unmasked later. 5009 */ 5010 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5011 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5012 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5013 5014 sc->sc_wdcdev.PIO_cap = 4; 5015 sc->sc_wdcdev.DMA_cap = 2; 5016 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5017 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5018 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5019 5020 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5021 PCI_CLASS_REG)); 5022 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5023 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5024 5025 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5026 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5027 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5028 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5029 else 5030 ctl |= NATSEMI_CTRL1_INTAMASK; 5031 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5032 5033 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5034 cp = &sc->pciide_channels[channel]; 5035 if (pciide_chansetup(sc, channel, interface) == 0) 5036 continue; 5037 5038 pciide_map_compat_intr(pa, cp, channel, interface); 5039 if (cp->hw_ok == 0) 5040 continue; 5041 5042 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5043 natsemi_pci_intr); 5044 if (cp->hw_ok == 0) { 5045 pciide_unmap_compat_intr(pa, cp, channel, interface); 5046 continue; 5047 } 5048 natsemi_setup_channel(&cp->wdc_channel); 5049 } 5050 } 5051 5052 void 5053 natsemi_setup_channel(struct channel_softc *chp) 5054 { 5055 struct ata_drive_datas *drvp; 5056 int drive, ndrives = 0; 5057 u_int32_t idedma_ctl = 0; 5058 struct pciide_channel *cp = (struct pciide_channel *)chp; 5059 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5060 u_int8_t tim; 5061 5062 /* setup DMA if needed */ 5063 pciide_channel_dma_setup(cp); 5064 5065 for (drive = 0; drive < 2; drive++) { 5066 drvp = &chp->ch_drive[drive]; 5067 /* If no drive, skip */ 5068 if ((drvp->drive_flags & DRIVE) == 0) 5069 continue; 5070 5071 ndrives++; 5072 /* add timing values, setup DMA if needed */ 5073 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5074 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5075 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5076 } else { 5077 /* 5078 * use Multiword DMA 5079 * Timings will be used for both PIO and DMA, 5080 * so adjust DMA mode if needed 5081 */ 5082 if (drvp->PIO_mode >= 3 && 5083 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5084 drvp->DMA_mode = drvp->PIO_mode - 2; 5085 } 5086 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5087 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5088 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5089 } 5090 5091 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5092 NATSEMI_RTREG(chp->channel, drive), tim); 5093 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5094 NATSEMI_WTREG(chp->channel, drive), tim); 5095 } 5096 if (idedma_ctl != 0) { 5097 /* Add software bits in status register */ 5098 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5099 IDEDMA_CTL(chp->channel), idedma_ctl); 5100 } 5101 if (ndrives > 0) { 5102 /* Unmask the channel if at least one drive is found */ 5103 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5104 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5105 ~(NATSEMI_CHMASK(chp->channel))); 5106 } 5107 5108 pciide_print_modes(cp); 5109 5110 /* Go ahead and ack interrupts generated during probe. */ 5111 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5112 IDEDMA_CTL(chp->channel), 5113 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5114 IDEDMA_CTL(chp->channel))); 5115 } 5116 5117 void 5118 natsemi_irqack(struct channel_softc *chp) 5119 { 5120 struct pciide_channel *cp = (struct pciide_channel *)chp; 5121 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5122 u_int8_t clr; 5123 5124 /* The "clear" bits are in the wrong register *sigh* */ 5125 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5126 IDEDMA_CMD(chp->channel)); 5127 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5128 IDEDMA_CTL(chp->channel)) & 5129 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5130 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5131 IDEDMA_CMD(chp->channel), clr); 5132 } 5133 5134 int 5135 natsemi_pci_intr(void *arg) 5136 { 5137 struct pciide_softc *sc = arg; 5138 struct pciide_channel *cp; 5139 struct channel_softc *wdc_cp; 5140 int i, rv, crv; 5141 u_int8_t msk; 5142 5143 rv = 0; 5144 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5145 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5146 cp = &sc->pciide_channels[i]; 5147 wdc_cp = &cp->wdc_channel; 5148 5149 /* If a compat channel skip. */ 5150 if (cp->compat) 5151 continue; 5152 5153 /* If this channel is masked, skip it. */ 5154 if (msk & NATSEMI_CHMASK(i)) 5155 continue; 5156 5157 if (pciide_intr_flag(cp) == 0) 5158 continue; 5159 5160 crv = wdcintr(wdc_cp); 5161 if (crv == 0) 5162 ; /* leave rv alone */ 5163 else if (crv == 1) 5164 rv = 1; /* claim the intr */ 5165 else if (rv == 0) /* crv should be -1 in this case */ 5166 rv = crv; /* if we've done no better, take it */ 5167 } 5168 return (rv); 5169 } 5170 5171 void 5172 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5173 { 5174 struct pciide_channel *cp; 5175 int channel; 5176 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5177 bus_size_t cmdsize, ctlsize; 5178 5179 printf(": DMA"); 5180 pciide_mapreg_dma(sc, pa); 5181 5182 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5183 WDC_CAPABILITY_MODE; 5184 if (sc->sc_dma_ok) { 5185 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5186 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5187 sc->sc_wdcdev.irqack = pciide_irqack; 5188 } 5189 sc->sc_wdcdev.PIO_cap = 4; 5190 sc->sc_wdcdev.DMA_cap = 2; 5191 sc->sc_wdcdev.UDMA_cap = 2; 5192 5193 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5194 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5195 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5196 5197 /* 5198 * Soekris net4801 errata 0003: 5199 * 5200 * The SC1100 built in busmaster IDE controller is pretty standard, 5201 * but have two bugs: data transfers need to be dword aligned and 5202 * it cannot do an exact 64Kbyte data transfer. 5203 * 5204 * Assume that reducing maximum segment size by one page 5205 * will be enough, and restrict boundary too for extra certainty. 5206 */ 5207 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5208 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5209 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5210 } 5211 5212 /* 5213 * This chip seems to be unable to do one-sector transfers 5214 * using DMA. 5215 */ 5216 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5217 5218 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5219 5220 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5221 cp = &sc->pciide_channels[channel]; 5222 if (pciide_chansetup(sc, channel, interface) == 0) 5223 continue; 5224 pciide_map_compat_intr(pa, cp, channel, interface); 5225 if (cp->hw_ok == 0) 5226 continue; 5227 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5228 pciide_pci_intr); 5229 if (cp->hw_ok == 0) { 5230 pciide_unmap_compat_intr(pa, cp, channel, interface); 5231 continue; 5232 } 5233 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5234 } 5235 } 5236 5237 void 5238 ns_scx200_setup_channel(struct channel_softc *chp) 5239 { 5240 struct ata_drive_datas *drvp; 5241 int drive, mode; 5242 u_int32_t idedma_ctl; 5243 struct pciide_channel *cp = (struct pciide_channel*)chp; 5244 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5245 int channel = chp->channel; 5246 int pioformat; 5247 pcireg_t piotim, dmatim; 5248 5249 /* Setup DMA if needed */ 5250 pciide_channel_dma_setup(cp); 5251 5252 idedma_ctl = 0; 5253 5254 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5255 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5256 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5257 DEBUG_PROBE); 5258 5259 /* Per channel settings */ 5260 for (drive = 0; drive < 2; drive++) { 5261 drvp = &chp->ch_drive[drive]; 5262 5263 /* If no drive, skip */ 5264 if ((drvp->drive_flags & DRIVE) == 0) 5265 continue; 5266 5267 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5268 SCx200_TIM_PIO(channel, drive)); 5269 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5270 SCx200_TIM_DMA(channel, drive)); 5271 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5272 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5273 piotim, dmatim), DEBUG_PROBE); 5274 5275 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5276 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5277 /* Setup UltraDMA mode */ 5278 drvp->drive_flags &= ~DRIVE_DMA; 5279 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5280 dmatim = scx200_udma33[drvp->UDMA_mode]; 5281 mode = drvp->PIO_mode; 5282 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5283 (drvp->drive_flags & DRIVE_DMA) != 0) { 5284 /* Setup multiword DMA mode */ 5285 drvp->drive_flags &= ~DRIVE_UDMA; 5286 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5287 dmatim = scx200_dma33[drvp->DMA_mode]; 5288 5289 /* mode = min(pio, dma + 2) */ 5290 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5291 mode = drvp->PIO_mode; 5292 else 5293 mode = drvp->DMA_mode + 2; 5294 } else { 5295 mode = drvp->PIO_mode; 5296 } 5297 5298 /* Setup PIO mode */ 5299 drvp->PIO_mode = mode; 5300 if (mode < 2) 5301 drvp->DMA_mode = 0; 5302 else 5303 drvp->DMA_mode = mode - 2; 5304 5305 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5306 5307 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5308 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5309 piotim, dmatim), DEBUG_PROBE); 5310 5311 pci_conf_write(sc->sc_pc, sc->sc_tag, 5312 SCx200_TIM_PIO(channel, drive), piotim); 5313 pci_conf_write(sc->sc_pc, sc->sc_tag, 5314 SCx200_TIM_DMA(channel, drive), dmatim); 5315 } 5316 5317 if (idedma_ctl != 0) { 5318 /* Add software bits in status register */ 5319 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5320 IDEDMA_CTL(channel), idedma_ctl); 5321 } 5322 5323 pciide_print_modes(cp); 5324 } 5325 5326 void 5327 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5328 { 5329 struct pciide_channel *cp; 5330 int channel; 5331 pcireg_t cr, interface; 5332 bus_size_t cmdsize, ctlsize; 5333 int rev = sc->sc_rev; 5334 5335 printf(": DMA"); 5336 pciide_mapreg_dma(sc, pa); 5337 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5338 WDC_CAPABILITY_MODE; 5339 5340 if (sc->sc_dma_ok) { 5341 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5342 if (rev >= 0x20) { 5343 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5344 if (rev >= 0xC4) 5345 sc->sc_wdcdev.UDMA_cap = 5; 5346 else if (rev >= 0xC2) 5347 sc->sc_wdcdev.UDMA_cap = 4; 5348 else 5349 sc->sc_wdcdev.UDMA_cap = 2; 5350 } 5351 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5352 sc->sc_wdcdev.irqack = pciide_irqack; 5353 } 5354 5355 sc->sc_wdcdev.PIO_cap = 4; 5356 sc->sc_wdcdev.DMA_cap = 2; 5357 sc->sc_wdcdev.set_modes = acer_setup_channel; 5358 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5359 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5360 5361 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5362 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5363 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5364 5365 /* Enable "microsoft register bits" R/W. */ 5366 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5367 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5368 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5369 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5370 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5371 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5372 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5373 ~ACER_CHANSTATUSREGS_RO); 5374 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5375 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5376 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5377 /* Don't use cr, re-read the real register content instead */ 5378 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5379 PCI_CLASS_REG)); 5380 5381 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5382 5383 /* From linux: enable "Cable Detection" */ 5384 if (rev >= 0xC2) 5385 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5386 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5387 | ACER_0x4B_CDETECT); 5388 5389 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5390 cp = &sc->pciide_channels[channel]; 5391 if (pciide_chansetup(sc, channel, interface) == 0) 5392 continue; 5393 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5394 printf("%s: %s ignored (disabled)\n", 5395 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5396 continue; 5397 } 5398 pciide_map_compat_intr(pa, cp, channel, interface); 5399 if (cp->hw_ok == 0) 5400 continue; 5401 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5402 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5403 if (cp->hw_ok == 0) { 5404 pciide_unmap_compat_intr(pa, cp, channel, interface); 5405 continue; 5406 } 5407 if (pciide_chan_candisable(cp)) { 5408 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5409 pci_conf_write(sc->sc_pc, sc->sc_tag, 5410 PCI_CLASS_REG, cr); 5411 } 5412 if (cp->hw_ok == 0) { 5413 pciide_unmap_compat_intr(pa, cp, channel, interface); 5414 continue; 5415 } 5416 acer_setup_channel(&cp->wdc_channel); 5417 } 5418 } 5419 5420 void 5421 acer_setup_channel(struct channel_softc *chp) 5422 { 5423 struct ata_drive_datas *drvp; 5424 int drive; 5425 u_int32_t acer_fifo_udma; 5426 u_int32_t idedma_ctl; 5427 struct pciide_channel *cp = (struct pciide_channel *)chp; 5428 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5429 5430 idedma_ctl = 0; 5431 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5432 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5433 acer_fifo_udma), DEBUG_PROBE); 5434 /* setup DMA if needed */ 5435 pciide_channel_dma_setup(cp); 5436 5437 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5438 DRIVE_UDMA) { /* check 80 pins cable */ 5439 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5440 ACER_0x4A_80PIN(chp->channel)) { 5441 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5442 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5443 DEBUG_PROBE); 5444 if (chp->ch_drive[0].UDMA_mode > 2) 5445 chp->ch_drive[0].UDMA_mode = 2; 5446 if (chp->ch_drive[1].UDMA_mode > 2) 5447 chp->ch_drive[1].UDMA_mode = 2; 5448 } 5449 } 5450 5451 for (drive = 0; drive < 2; drive++) { 5452 drvp = &chp->ch_drive[drive]; 5453 /* If no drive, skip */ 5454 if ((drvp->drive_flags & DRIVE) == 0) 5455 continue; 5456 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5457 "channel %d drive %d 0x%x\n", chp->channel, drive, 5458 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5459 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5460 /* clear FIFO/DMA mode */ 5461 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5462 ACER_UDMA_EN(chp->channel, drive) | 5463 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5464 5465 /* add timing values, setup DMA if needed */ 5466 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5467 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5468 acer_fifo_udma |= 5469 ACER_FTH_OPL(chp->channel, drive, 0x1); 5470 goto pio; 5471 } 5472 5473 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5474 if (drvp->drive_flags & DRIVE_UDMA) { 5475 /* use Ultra/DMA */ 5476 drvp->drive_flags &= ~DRIVE_DMA; 5477 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5478 acer_fifo_udma |= 5479 ACER_UDMA_TIM(chp->channel, drive, 5480 acer_udma[drvp->UDMA_mode]); 5481 /* XXX disable if one drive < UDMA3 ? */ 5482 if (drvp->UDMA_mode >= 3) { 5483 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5484 ACER_0x4B, 5485 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5486 ACER_0x4B) | ACER_0x4B_UDMA66); 5487 } 5488 } else { 5489 /* 5490 * use Multiword DMA 5491 * Timings will be used for both PIO and DMA, 5492 * so adjust DMA mode if needed 5493 */ 5494 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5495 drvp->PIO_mode = drvp->DMA_mode + 2; 5496 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5497 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5498 drvp->PIO_mode - 2 : 0; 5499 if (drvp->DMA_mode == 0) 5500 drvp->PIO_mode = 0; 5501 } 5502 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5503 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5504 ACER_IDETIM(chp->channel, drive), 5505 acer_pio[drvp->PIO_mode]); 5506 } 5507 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5508 acer_fifo_udma), DEBUG_PROBE); 5509 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5510 if (idedma_ctl != 0) { 5511 /* Add software bits in status register */ 5512 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5513 IDEDMA_CTL(chp->channel), idedma_ctl); 5514 } 5515 pciide_print_modes(cp); 5516 } 5517 5518 int 5519 acer_pci_intr(void *arg) 5520 { 5521 struct pciide_softc *sc = arg; 5522 struct pciide_channel *cp; 5523 struct channel_softc *wdc_cp; 5524 int i, rv, crv; 5525 u_int32_t chids; 5526 5527 rv = 0; 5528 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5529 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5530 cp = &sc->pciide_channels[i]; 5531 wdc_cp = &cp->wdc_channel; 5532 /* If a compat channel skip. */ 5533 if (cp->compat) 5534 continue; 5535 if (chids & ACER_CHIDS_INT(i)) { 5536 crv = wdcintr(wdc_cp); 5537 if (crv == 0) 5538 printf("%s:%d: bogus intr\n", 5539 sc->sc_wdcdev.sc_dev.dv_xname, i); 5540 else 5541 rv = 1; 5542 } 5543 } 5544 return (rv); 5545 } 5546 5547 void 5548 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5549 { 5550 struct pciide_channel *cp; 5551 int i, compatchan, revision; 5552 pcireg_t interface; 5553 bus_size_t cmdsize, ctlsize; 5554 5555 revision = sc->sc_rev; 5556 5557 /* 5558 * when the chip is in native mode it identifies itself as a 5559 * 'misc mass storage'. Fake interface in this case. 5560 */ 5561 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 5562 interface = PCI_INTERFACE(pa->pa_class); 5563 } else { 5564 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 5565 PCIIDE_INTERFACE_PCI(0); 5566 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5567 (revision == HPT370_REV || revision == HPT370A_REV || 5568 revision == HPT372_REV)) || 5569 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5570 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5571 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5572 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5573 interface |= PCIIDE_INTERFACE_PCI(1); 5574 } 5575 5576 printf(": DMA"); 5577 pciide_mapreg_dma(sc, pa); 5578 printf("\n"); 5579 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5580 WDC_CAPABILITY_MODE; 5581 if (sc->sc_dma_ok) { 5582 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5583 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5584 sc->sc_wdcdev.irqack = pciide_irqack; 5585 } 5586 sc->sc_wdcdev.PIO_cap = 4; 5587 sc->sc_wdcdev.DMA_cap = 2; 5588 5589 sc->sc_wdcdev.set_modes = hpt_setup_channel; 5590 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5591 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5592 revision == HPT366_REV) { 5593 sc->sc_wdcdev.UDMA_cap = 4; 5594 /* 5595 * The 366 has 2 PCI IDE functions, one for primary and one 5596 * for secondary. So we need to call pciide_mapregs_compat() 5597 * with the real channel 5598 */ 5599 if (pa->pa_function == 0) { 5600 compatchan = 0; 5601 } else if (pa->pa_function == 1) { 5602 compatchan = 1; 5603 } else { 5604 printf("%s: unexpected PCI function %d\n", 5605 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 5606 return; 5607 } 5608 sc->sc_wdcdev.nchannels = 1; 5609 } else { 5610 sc->sc_wdcdev.nchannels = 2; 5611 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5612 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5613 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5614 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5615 sc->sc_wdcdev.UDMA_cap = 6; 5616 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 5617 if (revision == HPT372_REV) 5618 sc->sc_wdcdev.UDMA_cap = 6; 5619 else 5620 sc->sc_wdcdev.UDMA_cap = 5; 5621 } 5622 } 5623 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5624 cp = &sc->pciide_channels[i]; 5625 if (sc->sc_wdcdev.nchannels > 1) { 5626 compatchan = i; 5627 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 5628 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 5629 printf("%s: %s ignored (disabled)\n", 5630 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5631 continue; 5632 } 5633 } 5634 if (pciide_chansetup(sc, i, interface) == 0) 5635 continue; 5636 if (interface & PCIIDE_INTERFACE_PCI(i)) { 5637 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 5638 &ctlsize, hpt_pci_intr); 5639 } else { 5640 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 5641 &cmdsize, &ctlsize); 5642 } 5643 if (cp->hw_ok == 0) 5644 return; 5645 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 5646 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 5647 wdcattach(&cp->wdc_channel); 5648 hpt_setup_channel(&cp->wdc_channel); 5649 } 5650 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5651 (revision == HPT370_REV || revision == HPT370A_REV || 5652 revision == HPT372_REV)) || 5653 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5654 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5655 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5656 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 5657 /* 5658 * Turn off fast interrupts 5659 */ 5660 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 5661 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 5662 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5663 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 5664 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 5665 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5666 5667 /* 5668 * HPT370 and highter has a bit to disable interrupts, 5669 * make sure to clear it 5670 */ 5671 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 5672 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 5673 ~HPT_CSEL_IRQDIS); 5674 } 5675 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 5676 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5677 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5678 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5679 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 5680 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5681 revision == HPT372_REV)) 5682 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 5683 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 5684 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 5685 5686 return; 5687 } 5688 5689 void 5690 hpt_setup_channel(struct channel_softc *chp) 5691 { 5692 struct ata_drive_datas *drvp; 5693 int drive; 5694 int cable; 5695 u_int32_t before, after; 5696 u_int32_t idedma_ctl; 5697 struct pciide_channel *cp = (struct pciide_channel *)chp; 5698 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5699 int revision = sc->sc_rev; 5700 u_int32_t *tim_pio, *tim_dma, *tim_udma; 5701 5702 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 5703 5704 /* setup DMA if needed */ 5705 pciide_channel_dma_setup(cp); 5706 5707 idedma_ctl = 0; 5708 5709 switch (sc->sc_pp->ide_product) { 5710 case PCI_PRODUCT_TRIONES_HPT366: 5711 if (revision == HPT370_REV || 5712 revision == HPT370A_REV) { 5713 tim_pio = hpt370_pio; 5714 tim_dma = hpt370_dma; 5715 tim_udma = hpt370_udma; 5716 } else if (revision == HPT372_REV) { 5717 tim_pio = hpt372_pio; 5718 tim_dma = hpt372_dma; 5719 tim_udma = hpt372_udma; 5720 } else { 5721 tim_pio = hpt366_pio; 5722 tim_dma = hpt366_dma; 5723 tim_udma = hpt366_udma; 5724 } 5725 break; 5726 case PCI_PRODUCT_TRIONES_HPT372A: 5727 case PCI_PRODUCT_TRIONES_HPT302: 5728 case PCI_PRODUCT_TRIONES_HPT371: 5729 tim_pio = hpt372_pio; 5730 tim_dma = hpt372_dma; 5731 tim_udma = hpt372_udma; 5732 break; 5733 case PCI_PRODUCT_TRIONES_HPT374: 5734 tim_pio = hpt374_pio; 5735 tim_dma = hpt374_dma; 5736 tim_udma = hpt374_udma; 5737 break; 5738 default: 5739 printf("%s: no known timing values\n", 5740 sc->sc_wdcdev.sc_dev.dv_xname); 5741 goto end; 5742 } 5743 5744 /* Per drive settings */ 5745 for (drive = 0; drive < 2; drive++) { 5746 drvp = &chp->ch_drive[drive]; 5747 /* If no drive, skip */ 5748 if ((drvp->drive_flags & DRIVE) == 0) 5749 continue; 5750 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 5751 HPT_IDETIM(chp->channel, drive)); 5752 5753 /* add timing values, setup DMA if needed */ 5754 if (drvp->drive_flags & DRIVE_UDMA) { 5755 /* use Ultra/DMA */ 5756 drvp->drive_flags &= ~DRIVE_DMA; 5757 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 5758 drvp->UDMA_mode > 2) { 5759 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5760 "cable not detected\n", drvp->drive_name, 5761 sc->sc_wdcdev.sc_dev.dv_xname, 5762 chp->channel, drive), DEBUG_PROBE); 5763 drvp->UDMA_mode = 2; 5764 } 5765 after = tim_udma[drvp->UDMA_mode]; 5766 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5767 } else if (drvp->drive_flags & DRIVE_DMA) { 5768 /* 5769 * use Multiword DMA. 5770 * Timings will be used for both PIO and DMA, so adjust 5771 * DMA mode if needed 5772 */ 5773 if (drvp->PIO_mode >= 3 && 5774 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5775 drvp->DMA_mode = drvp->PIO_mode - 2; 5776 } 5777 after = tim_dma[drvp->DMA_mode]; 5778 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5779 } else { 5780 /* PIO only */ 5781 after = tim_pio[drvp->PIO_mode]; 5782 } 5783 pci_conf_write(sc->sc_pc, sc->sc_tag, 5784 HPT_IDETIM(chp->channel, drive), after); 5785 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 5786 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 5787 after, before), DEBUG_PROBE); 5788 } 5789 end: 5790 if (idedma_ctl != 0) { 5791 /* Add software bits in status register */ 5792 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5793 IDEDMA_CTL(chp->channel), idedma_ctl); 5794 } 5795 pciide_print_modes(cp); 5796 } 5797 5798 int 5799 hpt_pci_intr(void *arg) 5800 { 5801 struct pciide_softc *sc = arg; 5802 struct pciide_channel *cp; 5803 struct channel_softc *wdc_cp; 5804 int rv = 0; 5805 int dmastat, i, crv; 5806 5807 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5808 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5809 IDEDMA_CTL(i)); 5810 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 5811 IDEDMA_CTL_INTR) 5812 continue; 5813 cp = &sc->pciide_channels[i]; 5814 wdc_cp = &cp->wdc_channel; 5815 crv = wdcintr(wdc_cp); 5816 if (crv == 0) { 5817 printf("%s:%d: bogus intr\n", 5818 sc->sc_wdcdev.sc_dev.dv_xname, i); 5819 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5820 IDEDMA_CTL(i), dmastat); 5821 } else 5822 rv = 1; 5823 } 5824 return (rv); 5825 } 5826 5827 /* Macros to test product */ 5828 #define PDC_IS_262(sc) \ 5829 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 5830 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 5831 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 5832 #define PDC_IS_265(sc) \ 5833 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 5834 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 5835 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 5836 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 5837 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5838 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5839 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5840 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5841 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5842 #define PDC_IS_268(sc) \ 5843 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 5844 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 5845 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5846 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5847 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5848 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5849 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5850 #define PDC_IS_269(sc) \ 5851 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5852 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5853 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5854 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5855 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5856 5857 u_int8_t 5858 pdc268_config_read(struct channel_softc *chp, int index) 5859 { 5860 struct pciide_channel *cp = (struct pciide_channel *)chp; 5861 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5862 int channel = chp->channel; 5863 5864 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5865 PDC268_INDEX(channel), index); 5866 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5867 PDC268_DATA(channel))); 5868 } 5869 5870 void 5871 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5872 { 5873 struct pciide_channel *cp; 5874 int channel; 5875 pcireg_t interface, st, mode; 5876 bus_size_t cmdsize, ctlsize; 5877 5878 if (!PDC_IS_268(sc)) { 5879 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 5880 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 5881 st), DEBUG_PROBE); 5882 } 5883 5884 /* turn off RAID mode */ 5885 if (!PDC_IS_268(sc)) 5886 st &= ~PDC2xx_STATE_IDERAID; 5887 5888 /* 5889 * can't rely on the PCI_CLASS_REG content if the chip was in raid 5890 * mode. We have to fake interface 5891 */ 5892 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 5893 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 5894 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 5895 5896 printf(": DMA"); 5897 pciide_mapreg_dma(sc, pa); 5898 5899 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5900 WDC_CAPABILITY_MODE; 5901 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 5902 PDC_IS_262(sc)) 5903 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 5904 if (sc->sc_dma_ok) { 5905 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5906 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5907 sc->sc_wdcdev.irqack = pciide_irqack; 5908 } 5909 sc->sc_wdcdev.PIO_cap = 4; 5910 sc->sc_wdcdev.DMA_cap = 2; 5911 if (PDC_IS_269(sc)) 5912 sc->sc_wdcdev.UDMA_cap = 6; 5913 else if (PDC_IS_265(sc)) 5914 sc->sc_wdcdev.UDMA_cap = 5; 5915 else if (PDC_IS_262(sc)) 5916 sc->sc_wdcdev.UDMA_cap = 4; 5917 else 5918 sc->sc_wdcdev.UDMA_cap = 2; 5919 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 5920 pdc20268_setup_channel : pdc202xx_setup_channel; 5921 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5922 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5923 5924 if (PDC_IS_262(sc)) { 5925 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 5926 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 5927 } 5928 5929 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5930 if (!PDC_IS_268(sc)) { 5931 /* setup failsafe defaults */ 5932 mode = 0; 5933 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 5934 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 5935 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 5936 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 5937 for (channel = 0; 5938 channel < sc->sc_wdcdev.nchannels; 5939 channel++) { 5940 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 5941 "drive 0 initial timings 0x%x, now 0x%x\n", 5942 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 5943 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 5944 DEBUG_PROBE); 5945 pci_conf_write(sc->sc_pc, sc->sc_tag, 5946 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 5947 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 5948 "drive 1 initial timings 0x%x, now 0x%x\n", 5949 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 5950 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 5951 pci_conf_write(sc->sc_pc, sc->sc_tag, 5952 PDC2xx_TIM(channel, 1), mode); 5953 } 5954 5955 mode = PDC2xx_SCR_DMA; 5956 if (PDC_IS_262(sc)) { 5957 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 5958 } else { 5959 /* the BIOS set it up this way */ 5960 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 5961 } 5962 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 5963 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 5964 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 5965 "now 0x%x\n", 5966 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 5967 PDC2xx_SCR), 5968 mode), DEBUG_PROBE); 5969 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 5970 PDC2xx_SCR, mode); 5971 5972 /* controller initial state register is OK even without BIOS */ 5973 /* Set DMA mode to IDE DMA compatibility */ 5974 mode = 5975 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 5976 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 5977 DEBUG_PROBE); 5978 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 5979 mode | 0x1); 5980 mode = 5981 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 5982 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 5983 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 5984 mode | 0x1); 5985 } 5986 5987 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5988 cp = &sc->pciide_channels[channel]; 5989 if (pciide_chansetup(sc, channel, interface) == 0) 5990 continue; 5991 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 5992 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 5993 printf("%s: %s ignored (disabled)\n", 5994 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5995 continue; 5996 } 5997 pciide_map_compat_intr(pa, cp, channel, interface); 5998 if (cp->hw_ok == 0) 5999 continue; 6000 if (PDC_IS_265(sc)) 6001 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6002 pdc20265_pci_intr); 6003 else 6004 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6005 pdc202xx_pci_intr); 6006 if (cp->hw_ok == 0) { 6007 pciide_unmap_compat_intr(pa, cp, channel, interface); 6008 continue; 6009 } 6010 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6011 st &= ~(PDC_IS_262(sc) ? 6012 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6013 pciide_unmap_compat_intr(pa, cp, channel, interface); 6014 } 6015 if (PDC_IS_268(sc)) 6016 pdc20268_setup_channel(&cp->wdc_channel); 6017 else 6018 pdc202xx_setup_channel(&cp->wdc_channel); 6019 } 6020 if (!PDC_IS_268(sc)) { 6021 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6022 "0x%x\n", st), DEBUG_PROBE); 6023 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6024 } 6025 return; 6026 } 6027 6028 void 6029 pdc202xx_setup_channel(struct channel_softc *chp) 6030 { 6031 struct ata_drive_datas *drvp; 6032 int drive; 6033 pcireg_t mode, st; 6034 u_int32_t idedma_ctl, scr, atapi; 6035 struct pciide_channel *cp = (struct pciide_channel *)chp; 6036 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6037 int channel = chp->channel; 6038 6039 /* setup DMA if needed */ 6040 pciide_channel_dma_setup(cp); 6041 6042 idedma_ctl = 0; 6043 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6044 sc->sc_wdcdev.sc_dev.dv_xname, 6045 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6046 DEBUG_PROBE); 6047 6048 /* Per channel settings */ 6049 if (PDC_IS_262(sc)) { 6050 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6051 PDC262_U66); 6052 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6053 /* Check cable */ 6054 if ((st & PDC262_STATE_80P(channel)) != 0 && 6055 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6056 chp->ch_drive[0].UDMA_mode > 2) || 6057 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6058 chp->ch_drive[1].UDMA_mode > 2))) { 6059 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6060 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6061 DEBUG_PROBE); 6062 if (chp->ch_drive[0].UDMA_mode > 2) 6063 chp->ch_drive[0].UDMA_mode = 2; 6064 if (chp->ch_drive[1].UDMA_mode > 2) 6065 chp->ch_drive[1].UDMA_mode = 2; 6066 } 6067 /* Trim UDMA mode */ 6068 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6069 chp->ch_drive[0].UDMA_mode <= 2) || 6070 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6071 chp->ch_drive[1].UDMA_mode <= 2)) { 6072 if (chp->ch_drive[0].UDMA_mode > 2) 6073 chp->ch_drive[0].UDMA_mode = 2; 6074 if (chp->ch_drive[1].UDMA_mode > 2) 6075 chp->ch_drive[1].UDMA_mode = 2; 6076 } 6077 /* Set U66 if needed */ 6078 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6079 chp->ch_drive[0].UDMA_mode > 2) || 6080 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6081 chp->ch_drive[1].UDMA_mode > 2)) 6082 scr |= PDC262_U66_EN(channel); 6083 else 6084 scr &= ~PDC262_U66_EN(channel); 6085 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6086 PDC262_U66, scr); 6087 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6088 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6089 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6090 PDC262_ATAPI(channel))), DEBUG_PROBE); 6091 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6092 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6093 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6094 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6095 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6096 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6097 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6098 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6099 atapi = 0; 6100 else 6101 atapi = PDC262_ATAPI_UDMA; 6102 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6103 PDC262_ATAPI(channel), atapi); 6104 } 6105 } 6106 for (drive = 0; drive < 2; drive++) { 6107 drvp = &chp->ch_drive[drive]; 6108 /* If no drive, skip */ 6109 if ((drvp->drive_flags & DRIVE) == 0) 6110 continue; 6111 mode = 0; 6112 if (drvp->drive_flags & DRIVE_UDMA) { 6113 /* use Ultra/DMA */ 6114 drvp->drive_flags &= ~DRIVE_DMA; 6115 mode = PDC2xx_TIM_SET_MB(mode, 6116 pdc2xx_udma_mb[drvp->UDMA_mode]); 6117 mode = PDC2xx_TIM_SET_MC(mode, 6118 pdc2xx_udma_mc[drvp->UDMA_mode]); 6119 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6120 } else if (drvp->drive_flags & DRIVE_DMA) { 6121 mode = PDC2xx_TIM_SET_MB(mode, 6122 pdc2xx_dma_mb[drvp->DMA_mode]); 6123 mode = PDC2xx_TIM_SET_MC(mode, 6124 pdc2xx_dma_mc[drvp->DMA_mode]); 6125 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6126 } else { 6127 mode = PDC2xx_TIM_SET_MB(mode, 6128 pdc2xx_dma_mb[0]); 6129 mode = PDC2xx_TIM_SET_MC(mode, 6130 pdc2xx_dma_mc[0]); 6131 } 6132 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6133 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6134 if (drvp->drive_flags & DRIVE_ATA) 6135 mode |= PDC2xx_TIM_PRE; 6136 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6137 if (drvp->PIO_mode >= 3) { 6138 mode |= PDC2xx_TIM_IORDY; 6139 if (drive == 0) 6140 mode |= PDC2xx_TIM_IORDYp; 6141 } 6142 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6143 "timings 0x%x\n", 6144 sc->sc_wdcdev.sc_dev.dv_xname, 6145 chp->channel, drive, mode), DEBUG_PROBE); 6146 pci_conf_write(sc->sc_pc, sc->sc_tag, 6147 PDC2xx_TIM(chp->channel, drive), mode); 6148 } 6149 if (idedma_ctl != 0) { 6150 /* Add software bits in status register */ 6151 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6152 IDEDMA_CTL(channel), idedma_ctl); 6153 } 6154 pciide_print_modes(cp); 6155 } 6156 6157 void 6158 pdc20268_setup_channel(struct channel_softc *chp) 6159 { 6160 struct ata_drive_datas *drvp; 6161 int drive, cable; 6162 u_int32_t idedma_ctl; 6163 struct pciide_channel *cp = (struct pciide_channel *)chp; 6164 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6165 int channel = chp->channel; 6166 6167 /* check 80 pins cable */ 6168 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6169 6170 /* setup DMA if needed */ 6171 pciide_channel_dma_setup(cp); 6172 6173 idedma_ctl = 0; 6174 6175 for (drive = 0; drive < 2; drive++) { 6176 drvp = &chp->ch_drive[drive]; 6177 /* If no drive, skip */ 6178 if ((drvp->drive_flags & DRIVE) == 0) 6179 continue; 6180 if (drvp->drive_flags & DRIVE_UDMA) { 6181 /* use Ultra/DMA */ 6182 drvp->drive_flags &= ~DRIVE_DMA; 6183 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6184 if (cable && drvp->UDMA_mode > 2) { 6185 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6186 "cable not detected\n", drvp->drive_name, 6187 sc->sc_wdcdev.sc_dev.dv_xname, 6188 channel, drive), DEBUG_PROBE); 6189 drvp->UDMA_mode = 2; 6190 } 6191 } else if (drvp->drive_flags & DRIVE_DMA) { 6192 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6193 } 6194 } 6195 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6196 if (idedma_ctl != 0) { 6197 /* Add software bits in status register */ 6198 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6199 IDEDMA_CTL(channel), idedma_ctl); 6200 } 6201 pciide_print_modes(cp); 6202 } 6203 6204 int 6205 pdc202xx_pci_intr(void *arg) 6206 { 6207 struct pciide_softc *sc = arg; 6208 struct pciide_channel *cp; 6209 struct channel_softc *wdc_cp; 6210 int i, rv, crv; 6211 u_int32_t scr; 6212 6213 rv = 0; 6214 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6215 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6216 cp = &sc->pciide_channels[i]; 6217 wdc_cp = &cp->wdc_channel; 6218 /* If a compat channel skip. */ 6219 if (cp->compat) 6220 continue; 6221 if (scr & PDC2xx_SCR_INT(i)) { 6222 crv = wdcintr(wdc_cp); 6223 if (crv == 0) 6224 printf("%s:%d: bogus intr (reg 0x%x)\n", 6225 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6226 else 6227 rv = 1; 6228 } 6229 } 6230 return (rv); 6231 } 6232 6233 int 6234 pdc20265_pci_intr(void *arg) 6235 { 6236 struct pciide_softc *sc = arg; 6237 struct pciide_channel *cp; 6238 struct channel_softc *wdc_cp; 6239 int i, rv, crv; 6240 u_int32_t dmastat; 6241 6242 rv = 0; 6243 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6244 cp = &sc->pciide_channels[i]; 6245 wdc_cp = &cp->wdc_channel; 6246 /* If a compat channel skip. */ 6247 if (cp->compat) 6248 continue; 6249 6250 /* 6251 * In case of shared IRQ check that the interrupt 6252 * was actually generated by this channel. 6253 * Only check the channel that is enabled. 6254 */ 6255 if (cp->hw_ok && PDC_IS_268(sc)) { 6256 if ((pdc268_config_read(wdc_cp, 6257 0x0b) & PDC268_INTR) == 0) 6258 continue; 6259 } 6260 6261 /* 6262 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6263 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6264 * So use it instead (requires 2 reg reads instead of 1, 6265 * but we can't do it another way). 6266 */ 6267 dmastat = bus_space_read_1(sc->sc_dma_iot, 6268 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6269 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6270 continue; 6271 6272 crv = wdcintr(wdc_cp); 6273 if (crv == 0) 6274 printf("%s:%d: bogus intr\n", 6275 sc->sc_wdcdev.sc_dev.dv_xname, i); 6276 else 6277 rv = 1; 6278 } 6279 return (rv); 6280 } 6281 6282 void 6283 pdc20262_dma_start(void *v, int channel, int drive) 6284 { 6285 struct pciide_softc *sc = v; 6286 struct pciide_dma_maps *dma_maps = 6287 &sc->pciide_channels[channel].dma_maps[drive]; 6288 u_int8_t clock; 6289 u_int32_t count; 6290 6291 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6292 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6293 PDC262_U66); 6294 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6295 PDC262_U66, clock | PDC262_U66_EN(channel)); 6296 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6297 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6298 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6299 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6300 PDC262_ATAPI(channel), count); 6301 } 6302 6303 pciide_dma_start(v, channel, drive); 6304 } 6305 6306 int 6307 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6308 { 6309 struct pciide_softc *sc = v; 6310 struct pciide_dma_maps *dma_maps = 6311 &sc->pciide_channels[channel].dma_maps[drive]; 6312 u_int8_t clock; 6313 6314 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6315 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6316 PDC262_U66); 6317 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6318 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6319 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6320 PDC262_ATAPI(channel), 0); 6321 } 6322 6323 return (pciide_dma_finish(v, channel, drive, force)); 6324 } 6325 6326 void 6327 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6328 { 6329 struct pciide_channel *cp; 6330 struct channel_softc *wdc_cp; 6331 struct pciide_pdcsata *ps; 6332 int channel, i; 6333 bus_size_t dmasize; 6334 pci_intr_handle_t intrhandle; 6335 const char *intrstr; 6336 6337 /* Allocate memory for private data */ 6338 sc->sc_cookie = malloc(sizeof(*ps), M_DEVBUF, M_NOWAIT | M_ZERO); 6339 ps = sc->sc_cookie; 6340 6341 /* 6342 * Promise SATA controllers have 3 or 4 channels, 6343 * the usual IDE registers are mapped in I/O space, with offsets. 6344 */ 6345 if (pci_intr_map(pa, &intrhandle) != 0) { 6346 printf(": couldn't map interrupt\n"); 6347 return; 6348 } 6349 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6350 6351 switch (sc->sc_pp->ide_product) { 6352 case PCI_PRODUCT_PROMISE_PDC20318: 6353 case PCI_PRODUCT_PROMISE_PDC20319: 6354 case PCI_PRODUCT_PROMISE_PDC20371: 6355 case PCI_PRODUCT_PROMISE_PDC20375: 6356 case PCI_PRODUCT_PROMISE_PDC20376: 6357 case PCI_PRODUCT_PROMISE_PDC20377: 6358 case PCI_PRODUCT_PROMISE_PDC20378: 6359 case PCI_PRODUCT_PROMISE_PDC20379: 6360 default: 6361 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6362 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6363 sc->sc_wdcdev.sc_dev.dv_xname); 6364 break; 6365 6366 case PCI_PRODUCT_PROMISE_PDC40518: 6367 case PCI_PRODUCT_PROMISE_PDC40519: 6368 case PCI_PRODUCT_PROMISE_PDC40718: 6369 case PCI_PRODUCT_PROMISE_PDC40719: 6370 case PCI_PRODUCT_PROMISE_PDC40779: 6371 case PCI_PRODUCT_PROMISE_PDC20571: 6372 case PCI_PRODUCT_PROMISE_PDC20575: 6373 case PCI_PRODUCT_PROMISE_PDC20579: 6374 case PCI_PRODUCT_PROMISE_PDC20771: 6375 case PCI_PRODUCT_PROMISE_PDC20775: 6376 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6377 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6378 sc->sc_wdcdev.sc_dev.dv_xname); 6379 break; 6380 } 6381 6382 if (sc->sc_pci_ih == NULL) { 6383 printf(": couldn't establish native-PCI interrupt"); 6384 if (intrstr != NULL) 6385 printf(" at %s", intrstr); 6386 printf("\n"); 6387 return; 6388 } 6389 6390 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6391 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6392 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6393 if (!sc->sc_dma_ok) { 6394 printf(": couldn't map bus-master DMA registers\n"); 6395 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6396 return; 6397 } 6398 6399 sc->sc_dmat = pa->pa_dmat; 6400 6401 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6402 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6403 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6404 printf(": couldn't map IDE registers\n"); 6405 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6406 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6407 return; 6408 } 6409 6410 printf(": DMA\n"); 6411 6412 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6413 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6414 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6415 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6416 sc->sc_wdcdev.PIO_cap = 4; 6417 sc->sc_wdcdev.DMA_cap = 2; 6418 sc->sc_wdcdev.UDMA_cap = 6; 6419 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6420 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6421 6422 switch (sc->sc_pp->ide_product) { 6423 case PCI_PRODUCT_PROMISE_PDC20318: 6424 case PCI_PRODUCT_PROMISE_PDC20319: 6425 case PCI_PRODUCT_PROMISE_PDC20371: 6426 case PCI_PRODUCT_PROMISE_PDC20375: 6427 case PCI_PRODUCT_PROMISE_PDC20376: 6428 case PCI_PRODUCT_PROMISE_PDC20377: 6429 case PCI_PRODUCT_PROMISE_PDC20378: 6430 case PCI_PRODUCT_PROMISE_PDC20379: 6431 default: 6432 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6433 sc->sc_wdcdev.nchannels = 6434 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6435 PDC203xx_NCHANNELS : 3; 6436 break; 6437 6438 case PCI_PRODUCT_PROMISE_PDC40518: 6439 case PCI_PRODUCT_PROMISE_PDC40519: 6440 case PCI_PRODUCT_PROMISE_PDC40718: 6441 case PCI_PRODUCT_PROMISE_PDC40719: 6442 case PCI_PRODUCT_PROMISE_PDC40779: 6443 case PCI_PRODUCT_PROMISE_PDC20571: 6444 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6445 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6446 6447 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6448 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6449 6450 break; 6451 case PCI_PRODUCT_PROMISE_PDC20575: 6452 case PCI_PRODUCT_PROMISE_PDC20579: 6453 case PCI_PRODUCT_PROMISE_PDC20771: 6454 case PCI_PRODUCT_PROMISE_PDC20775: 6455 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6456 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6457 6458 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6459 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6460 6461 break; 6462 } 6463 6464 sc->sc_wdcdev.dma_arg = sc; 6465 sc->sc_wdcdev.dma_init = pciide_dma_init; 6466 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6467 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6468 6469 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6470 channel++) { 6471 cp = &sc->pciide_channels[channel]; 6472 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6473 6474 cp->ih = sc->sc_pci_ih; 6475 cp->name = NULL; 6476 cp->wdc_channel.channel = channel; 6477 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6478 cp->wdc_channel.ch_queue = 6479 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 6480 if (cp->wdc_channel.ch_queue == NULL) { 6481 printf("%s: channel %d: " 6482 "can't allocate memory for command queue\n", 6483 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6484 continue; 6485 } 6486 wdc_cp = &cp->wdc_channel; 6487 6488 ps->regs[channel].ctl_iot = ps->ba5_st; 6489 ps->regs[channel].cmd_iot = ps->ba5_st; 6490 6491 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6492 0x0238 + (channel << 7), 1, 6493 &ps->regs[channel].ctl_ioh) != 0) { 6494 printf("%s: couldn't map channel %d ctl regs\n", 6495 sc->sc_wdcdev.sc_dev.dv_xname, 6496 channel); 6497 continue; 6498 } 6499 for (i = 0; i < WDC_NREG; i++) { 6500 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6501 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6502 &ps->regs[channel].cmd_iohs[i]) != 0) { 6503 printf("%s: couldn't map channel %d cmd " 6504 "regs\n", 6505 sc->sc_wdcdev.sc_dev.dv_xname, 6506 channel); 6507 continue; 6508 } 6509 } 6510 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6511 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6512 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6513 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6514 wdc_cp->data32iot = wdc_cp->cmd_iot = 6515 ps->regs[channel].cmd_iot; 6516 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6517 ps->regs[channel].cmd_iohs[0]; 6518 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6519 6520 /* 6521 * Subregion de busmaster registers. They're spread all over 6522 * the controller's register space :(. They are also 4 bytes 6523 * sized, with some specific extentions in the extra bits. 6524 * It also seems that the IDEDMA_CTL register isn't available. 6525 */ 6526 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6527 0x260 + (channel << 7), 1, 6528 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 6529 printf("%s channel %d: can't subregion DMA " 6530 "registers\n", 6531 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6532 continue; 6533 } 6534 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6535 0x244 + (channel << 7), 4, 6536 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 6537 printf("%s channel %d: can't subregion DMA " 6538 "registers\n", 6539 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6540 continue; 6541 } 6542 6543 wdcattach(wdc_cp); 6544 bus_space_write_4(sc->sc_dma_iot, 6545 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6546 (bus_space_read_4(sc->sc_dma_iot, 6547 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6548 0) & ~0x00003f9f) | (channel + 1)); 6549 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6550 (channel + 1) << 2, 0x00000001); 6551 6552 pdc203xx_setup_channel(&cp->wdc_channel); 6553 } 6554 6555 printf("%s: using %s for native-PCI interrupt\n", 6556 sc->sc_wdcdev.sc_dev.dv_xname, 6557 intrstr ? intrstr : "unknown interrupt"); 6558 } 6559 6560 void 6561 pdc203xx_setup_channel(struct channel_softc *chp) 6562 { 6563 struct ata_drive_datas *drvp; 6564 struct pciide_channel *cp = (struct pciide_channel *)chp; 6565 int drive, s; 6566 6567 pciide_channel_dma_setup(cp); 6568 6569 for (drive = 0; drive < 2; drive++) { 6570 drvp = &chp->ch_drive[drive]; 6571 if ((drvp->drive_flags & DRIVE) == 0) 6572 continue; 6573 if (drvp->drive_flags & DRIVE_UDMA) { 6574 s = splbio(); 6575 drvp->drive_flags &= ~DRIVE_DMA; 6576 splx(s); 6577 } 6578 } 6579 pciide_print_modes(cp); 6580 } 6581 6582 int 6583 pdc203xx_pci_intr(void *arg) 6584 { 6585 struct pciide_softc *sc = arg; 6586 struct pciide_channel *cp; 6587 struct channel_softc *wdc_cp; 6588 struct pciide_pdcsata *ps = sc->sc_cookie; 6589 int i, rv, crv; 6590 u_int32_t scr; 6591 6592 rv = 0; 6593 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 6594 6595 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6596 cp = &sc->pciide_channels[i]; 6597 wdc_cp = &cp->wdc_channel; 6598 if (scr & (1 << (i + 1))) { 6599 crv = wdcintr(wdc_cp); 6600 if (crv == 0) { 6601 printf("%s:%d: bogus intr (reg 0x%x)\n", 6602 sc->sc_wdcdev.sc_dev.dv_xname, 6603 i, scr); 6604 } else 6605 rv = 1; 6606 } 6607 } 6608 6609 return (rv); 6610 } 6611 6612 int 6613 pdc205xx_pci_intr(void *arg) 6614 { 6615 struct pciide_softc *sc = arg; 6616 struct pciide_channel *cp; 6617 struct channel_softc *wdc_cp; 6618 struct pciide_pdcsata *ps = sc->sc_cookie; 6619 int i, rv, crv; 6620 u_int32_t scr, status; 6621 6622 rv = 0; 6623 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 6624 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 6625 6626 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 6627 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 6628 6629 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6630 cp = &sc->pciide_channels[i]; 6631 wdc_cp = &cp->wdc_channel; 6632 if (scr & (1 << (i + 1))) { 6633 crv = wdcintr(wdc_cp); 6634 if (crv == 0) { 6635 printf("%s:%d: bogus intr (reg 0x%x)\n", 6636 sc->sc_wdcdev.sc_dev.dv_xname, 6637 i, scr); 6638 } else 6639 rv = 1; 6640 } 6641 } 6642 return rv; 6643 } 6644 6645 void 6646 pdc203xx_irqack(struct channel_softc *chp) 6647 { 6648 struct pciide_channel *cp = (struct pciide_channel *)chp; 6649 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6650 struct pciide_pdcsata *ps = sc->sc_cookie; 6651 int chan = chp->channel; 6652 6653 bus_space_write_4(sc->sc_dma_iot, 6654 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 6655 (bus_space_read_4(sc->sc_dma_iot, 6656 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 6657 0) & ~0x00003f9f) | (chan + 1)); 6658 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6659 (chan + 1) << 2, 0x00000001); 6660 } 6661 6662 void 6663 pdc203xx_dma_start(void *v, int channel, int drive) 6664 { 6665 struct pciide_softc *sc = v; 6666 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6667 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6668 struct pciide_pdcsata *ps = sc->sc_cookie; 6669 6670 /* Write table address */ 6671 bus_space_write_4(sc->sc_dma_iot, 6672 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 6673 dma_maps->dmamap_table->dm_segs[0].ds_addr); 6674 6675 /* Start DMA engine */ 6676 bus_space_write_4(sc->sc_dma_iot, 6677 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6678 (bus_space_read_4(sc->sc_dma_iot, 6679 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6680 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 6681 } 6682 6683 int 6684 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 6685 { 6686 struct pciide_softc *sc = v; 6687 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6688 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6689 struct pciide_pdcsata *ps = sc->sc_cookie; 6690 6691 /* Stop DMA channel */ 6692 bus_space_write_4(sc->sc_dma_iot, 6693 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6694 (bus_space_read_4(sc->sc_dma_iot, 6695 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6696 0) & ~0x80)); 6697 6698 /* Unload the map of the data buffer */ 6699 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 6700 dma_maps->dmamap_xfer->dm_mapsize, 6701 (dma_maps->dma_flags & WDC_DMA_READ) ? 6702 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 6703 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 6704 6705 return (0); 6706 } 6707 6708 u_int8_t 6709 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 6710 { 6711 struct pciide_channel *cp = (struct pciide_channel *)chp; 6712 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6713 struct pciide_pdcsata *ps = sc->sc_cookie; 6714 u_int8_t val; 6715 6716 if (reg & _WDC_AUX) { 6717 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 6718 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 6719 } else { 6720 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 6721 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 6722 return (val); 6723 } 6724 } 6725 6726 void 6727 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 6728 { 6729 struct pciide_channel *cp = (struct pciide_channel *)chp; 6730 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6731 struct pciide_pdcsata *ps = sc->sc_cookie; 6732 6733 if (reg & _WDC_AUX) 6734 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 6735 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 6736 else 6737 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 6738 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 6739 0, val); 6740 } 6741 6742 void 6743 pdc205xx_do_reset(struct channel_softc *chp) 6744 { 6745 struct pciide_channel *cp = (struct pciide_channel *)chp; 6746 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6747 struct pciide_pdcsata *ps = sc->sc_cookie; 6748 u_int32_t scontrol; 6749 6750 wdc_do_reset(chp); 6751 6752 /* reset SATA */ 6753 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 6754 SCONTROL_WRITE(ps, chp->channel, scontrol); 6755 delay(50*1000); 6756 6757 scontrol &= ~SControl_DET_INIT; 6758 SCONTROL_WRITE(ps, chp->channel, scontrol); 6759 delay(50*1000); 6760 } 6761 6762 void 6763 pdc205xx_drv_probe(struct channel_softc *chp) 6764 { 6765 struct pciide_channel *cp = (struct pciide_channel *)chp; 6766 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6767 struct pciide_pdcsata *ps = sc->sc_cookie; 6768 bus_space_handle_t *iohs; 6769 u_int32_t scontrol, sstatus; 6770 u_int16_t scnt, sn, cl, ch; 6771 int i, s; 6772 6773 /* XXX This should be done by other code. */ 6774 for (i = 0; i < 2; i++) { 6775 chp->ch_drive[i].chnl_softc = chp; 6776 chp->ch_drive[i].drive = i; 6777 } 6778 6779 SCONTROL_WRITE(ps, chp->channel, 0); 6780 delay(50*1000); 6781 6782 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 6783 SCONTROL_WRITE(ps,chp->channel,scontrol); 6784 delay(50*1000); 6785 6786 scontrol &= ~SControl_DET_INIT; 6787 SCONTROL_WRITE(ps,chp->channel,scontrol); 6788 delay(50*1000); 6789 6790 sstatus = SSTATUS_READ(ps,chp->channel); 6791 6792 switch (sstatus & SStatus_DET_mask) { 6793 case SStatus_DET_NODEV: 6794 /* No Device; be silent. */ 6795 break; 6796 6797 case SStatus_DET_DEV_NE: 6798 printf("%s: port %d: device connected, but " 6799 "communication not established\n", 6800 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6801 break; 6802 6803 case SStatus_DET_OFFLINE: 6804 printf("%s: port %d: PHY offline\n", 6805 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6806 break; 6807 6808 case SStatus_DET_DEV: 6809 iohs = ps->regs[chp->channel].cmd_iohs; 6810 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 6811 WDSD_IBM); 6812 delay(10); /* 400ns delay */ 6813 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 6814 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 6815 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 6816 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 6817 #if 0 6818 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 6819 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 6820 scnt, sn, cl, ch); 6821 #endif 6822 /* 6823 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 6824 * cases we get wrong values here, so ignore it. 6825 */ 6826 s = splbio(); 6827 if (cl == 0x14 && ch == 0xeb) 6828 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 6829 else 6830 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 6831 splx(s); 6832 #if 0 6833 printf("%s: port %d: device present", 6834 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6835 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 6836 case 1: 6837 printf(", speed: 1.5Gb/s"); 6838 break; 6839 case 2: 6840 printf(", speed: 3.0Gb/s"); 6841 break; 6842 } 6843 printf("\n"); 6844 #endif 6845 break; 6846 6847 default: 6848 printf("%s: port %d: unknown SStatus: 0x%08x\n", 6849 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 6850 } 6851 } 6852 6853 #ifdef notyet 6854 /* 6855 * Inline functions for accessing the timing registers of the 6856 * OPTi controller. 6857 * 6858 * These *MUST* disable interrupts as they need atomic access to 6859 * certain magic registers. Failure to adhere to this *will* 6860 * break things in subtle ways if the wdc registers are accessed 6861 * by an interrupt routine while this magic sequence is executing. 6862 */ 6863 static __inline__ u_int8_t 6864 opti_read_config(struct channel_softc *chp, int reg) 6865 { 6866 u_int8_t rv; 6867 int s = splhigh(); 6868 6869 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 6870 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 6871 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 6872 6873 /* Followed by an 8-bit write of 0x3 to register #2 */ 6874 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 6875 6876 /* Now we can read the required register */ 6877 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 6878 6879 /* Restore the real registers */ 6880 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 6881 6882 splx(s); 6883 6884 return (rv); 6885 } 6886 6887 static __inline__ void 6888 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 6889 { 6890 int s = splhigh(); 6891 6892 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 6893 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 6894 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 6895 6896 /* Followed by an 8-bit write of 0x3 to register #2 */ 6897 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 6898 6899 /* Now we can write the required register */ 6900 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 6901 6902 /* Restore the real registers */ 6903 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 6904 6905 splx(s); 6906 } 6907 6908 void 6909 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6910 { 6911 struct pciide_channel *cp; 6912 bus_size_t cmdsize, ctlsize; 6913 pcireg_t interface; 6914 u_int8_t init_ctrl; 6915 int channel; 6916 6917 printf(": DMA"); 6918 /* 6919 * XXXSCW: 6920 * There seem to be a couple of buggy revisions/implementations 6921 * of the OPTi pciide chipset. This kludge seems to fix one of 6922 * the reported problems (NetBSD PR/11644) but still fails for the 6923 * other (NetBSD PR/13151), although the latter may be due to other 6924 * issues too... 6925 */ 6926 if (sc->sc_rev <= 0x12) { 6927 printf(" (disabled)"); 6928 sc->sc_dma_ok = 0; 6929 sc->sc_wdcdev.cap = 0; 6930 } else { 6931 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 6932 pciide_mapreg_dma(sc, pa); 6933 } 6934 6935 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 6936 sc->sc_wdcdev.PIO_cap = 4; 6937 if (sc->sc_dma_ok) { 6938 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 6939 sc->sc_wdcdev.irqack = pciide_irqack; 6940 sc->sc_wdcdev.DMA_cap = 2; 6941 } 6942 sc->sc_wdcdev.set_modes = opti_setup_channel; 6943 6944 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6945 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6946 6947 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 6948 OPTI_REG_INIT_CONTROL); 6949 6950 interface = PCI_INTERFACE(pa->pa_class); 6951 6952 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6953 6954 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6955 cp = &sc->pciide_channels[channel]; 6956 if (pciide_chansetup(sc, channel, interface) == 0) 6957 continue; 6958 if (channel == 1 && 6959 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 6960 printf("%s: %s ignored (disabled)\n", 6961 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6962 continue; 6963 } 6964 pciide_map_compat_intr(pa, cp, channel, interface); 6965 if (cp->hw_ok == 0) 6966 continue; 6967 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6968 pciide_pci_intr); 6969 if (cp->hw_ok == 0) { 6970 pciide_unmap_compat_intr(pa, cp, channel, interface); 6971 continue; 6972 } 6973 opti_setup_channel(&cp->wdc_channel); 6974 } 6975 } 6976 6977 void 6978 opti_setup_channel(struct channel_softc *chp) 6979 { 6980 struct ata_drive_datas *drvp; 6981 struct pciide_channel *cp = (struct pciide_channel *)chp; 6982 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6983 int drive, spd; 6984 int mode[2]; 6985 u_int8_t rv, mr; 6986 6987 /* 6988 * The `Delay' and `Address Setup Time' fields of the 6989 * Miscellaneous Register are always zero initially. 6990 */ 6991 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 6992 mr &= ~(OPTI_MISC_DELAY_MASK | 6993 OPTI_MISC_ADDR_SETUP_MASK | 6994 OPTI_MISC_INDEX_MASK); 6995 6996 /* Prime the control register before setting timing values */ 6997 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 6998 6999 /* Determine the clockrate of the PCIbus the chip is attached to */ 7000 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7001 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7002 7003 /* setup DMA if needed */ 7004 pciide_channel_dma_setup(cp); 7005 7006 for (drive = 0; drive < 2; drive++) { 7007 drvp = &chp->ch_drive[drive]; 7008 /* If no drive, skip */ 7009 if ((drvp->drive_flags & DRIVE) == 0) { 7010 mode[drive] = -1; 7011 continue; 7012 } 7013 7014 if ((drvp->drive_flags & DRIVE_DMA)) { 7015 /* 7016 * Timings will be used for both PIO and DMA, 7017 * so adjust DMA mode if needed 7018 */ 7019 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7020 drvp->PIO_mode = drvp->DMA_mode + 2; 7021 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7022 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7023 drvp->PIO_mode - 2 : 0; 7024 if (drvp->DMA_mode == 0) 7025 drvp->PIO_mode = 0; 7026 7027 mode[drive] = drvp->DMA_mode + 5; 7028 } else 7029 mode[drive] = drvp->PIO_mode; 7030 7031 if (drive && mode[0] >= 0 && 7032 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7033 /* 7034 * Can't have two drives using different values 7035 * for `Address Setup Time'. 7036 * Slow down the faster drive to compensate. 7037 */ 7038 int d = (opti_tim_as[spd][mode[0]] > 7039 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7040 7041 mode[d] = mode[1-d]; 7042 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7043 chp->ch_drive[d].DMA_mode = 0; 7044 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7045 } 7046 } 7047 7048 for (drive = 0; drive < 2; drive++) { 7049 int m; 7050 if ((m = mode[drive]) < 0) 7051 continue; 7052 7053 /* Set the Address Setup Time and select appropriate index */ 7054 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7055 rv |= OPTI_MISC_INDEX(drive); 7056 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7057 7058 /* Set the pulse width and recovery timing parameters */ 7059 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7060 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7061 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7062 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7063 7064 /* Set the Enhanced Mode register appropriately */ 7065 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7066 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7067 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7068 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7069 } 7070 7071 /* Finally, enable the timings */ 7072 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7073 7074 pciide_print_modes(cp); 7075 } 7076 #endif 7077 7078 void 7079 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7080 { 7081 struct pciide_channel *cp; 7082 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7083 pcitag_t pcib_tag; 7084 int channel; 7085 bus_size_t cmdsize, ctlsize; 7086 7087 printf(": DMA"); 7088 pciide_mapreg_dma(sc, pa); 7089 printf("\n"); 7090 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7091 WDC_CAPABILITY_MODE; 7092 7093 if (sc->sc_dma_ok) { 7094 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7095 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7096 sc->sc_wdcdev.irqack = pciide_irqack; 7097 } 7098 sc->sc_wdcdev.PIO_cap = 4; 7099 sc->sc_wdcdev.DMA_cap = 2; 7100 switch (sc->sc_pp->ide_product) { 7101 case PCI_PRODUCT_RCC_OSB4_IDE: 7102 sc->sc_wdcdev.UDMA_cap = 2; 7103 break; 7104 case PCI_PRODUCT_RCC_CSB5_IDE: 7105 if (sc->sc_rev < 0x92) 7106 sc->sc_wdcdev.UDMA_cap = 4; 7107 else 7108 sc->sc_wdcdev.UDMA_cap = 5; 7109 break; 7110 case PCI_PRODUCT_RCC_CSB6_IDE: 7111 sc->sc_wdcdev.UDMA_cap = 4; 7112 break; 7113 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7114 sc->sc_wdcdev.UDMA_cap = 5; 7115 break; 7116 } 7117 7118 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7119 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7120 sc->sc_wdcdev.nchannels = 7121 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7122 7123 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7124 cp = &sc->pciide_channels[channel]; 7125 if (pciide_chansetup(sc, channel, interface) == 0) 7126 continue; 7127 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7128 serverworks_pci_intr); 7129 if (cp->hw_ok == 0) 7130 return; 7131 pciide_map_compat_intr(pa, cp, channel, interface); 7132 if (cp->hw_ok == 0) 7133 return; 7134 serverworks_setup_channel(&cp->wdc_channel); 7135 } 7136 7137 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7138 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7139 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7140 } 7141 7142 void 7143 serverworks_setup_channel(struct channel_softc *chp) 7144 { 7145 struct ata_drive_datas *drvp; 7146 struct pciide_channel *cp = (struct pciide_channel *)chp; 7147 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7148 int channel = chp->channel; 7149 int drive, unit; 7150 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7151 u_int32_t idedma_ctl; 7152 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7153 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7154 7155 /* setup DMA if needed */ 7156 pciide_channel_dma_setup(cp); 7157 7158 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7159 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7160 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7161 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7162 7163 pio_time &= ~(0xffff << (16 * channel)); 7164 dma_time &= ~(0xffff << (16 * channel)); 7165 pio_mode &= ~(0xff << (8 * channel + 16)); 7166 udma_mode &= ~(0xff << (8 * channel + 16)); 7167 udma_mode &= ~(3 << (2 * channel)); 7168 7169 idedma_ctl = 0; 7170 7171 /* Per drive settings */ 7172 for (drive = 0; drive < 2; drive++) { 7173 drvp = &chp->ch_drive[drive]; 7174 /* If no drive, skip */ 7175 if ((drvp->drive_flags & DRIVE) == 0) 7176 continue; 7177 unit = drive + 2 * channel; 7178 /* add timing values, setup DMA if needed */ 7179 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7180 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7181 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7182 (drvp->drive_flags & DRIVE_UDMA)) { 7183 /* use Ultra/DMA, check for 80-pin cable */ 7184 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7185 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7186 PCI_SUBSYS_ID_REG)) & 7187 (1 << (14 + channel))) == 0) { 7188 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7189 "cable not detected\n", drvp->drive_name, 7190 sc->sc_wdcdev.sc_dev.dv_xname, 7191 channel, drive), DEBUG_PROBE); 7192 drvp->UDMA_mode = 2; 7193 } 7194 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7195 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7196 udma_mode |= 1 << unit; 7197 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7198 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7199 (drvp->drive_flags & DRIVE_DMA)) { 7200 /* use Multiword DMA */ 7201 drvp->drive_flags &= ~DRIVE_UDMA; 7202 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7203 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7204 } else { 7205 /* PIO only */ 7206 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7207 } 7208 } 7209 7210 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7211 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7212 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7213 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7214 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7215 7216 if (idedma_ctl != 0) { 7217 /* Add software bits in status register */ 7218 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7219 IDEDMA_CTL(channel), idedma_ctl); 7220 } 7221 pciide_print_modes(cp); 7222 } 7223 7224 int 7225 serverworks_pci_intr(void *arg) 7226 { 7227 struct pciide_softc *sc = arg; 7228 struct pciide_channel *cp; 7229 struct channel_softc *wdc_cp; 7230 int rv = 0; 7231 int dmastat, i, crv; 7232 7233 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7234 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7235 IDEDMA_CTL(i)); 7236 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7237 IDEDMA_CTL_INTR) 7238 continue; 7239 cp = &sc->pciide_channels[i]; 7240 wdc_cp = &cp->wdc_channel; 7241 crv = wdcintr(wdc_cp); 7242 if (crv == 0) { 7243 printf("%s:%d: bogus intr\n", 7244 sc->sc_wdcdev.sc_dev.dv_xname, i); 7245 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7246 IDEDMA_CTL(i), dmastat); 7247 } else 7248 rv = 1; 7249 } 7250 return (rv); 7251 } 7252 7253 void 7254 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7255 { 7256 struct pciide_channel *cp; 7257 pci_intr_handle_t intrhandle; 7258 const char *intrstr; 7259 int channel; 7260 struct pciide_svwsata *ss; 7261 7262 /* Allocate memory for private data */ 7263 sc->sc_cookie = malloc(sizeof(*ss), M_DEVBUF, M_NOWAIT | M_ZERO); 7264 ss = sc->sc_cookie; 7265 7266 /* The 4-port version has a dummy second function. */ 7267 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7268 PCI_MAPREG_START + 0x14) == 0) { 7269 printf("\n"); 7270 return; 7271 } 7272 7273 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7274 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7275 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7276 printf(": unable to map BA5 register space\n"); 7277 return; 7278 } 7279 7280 printf(": DMA"); 7281 svwsata_mapreg_dma(sc, pa); 7282 printf("\n"); 7283 7284 if (sc->sc_dma_ok) { 7285 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7286 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7287 sc->sc_wdcdev.irqack = pciide_irqack; 7288 } 7289 sc->sc_wdcdev.PIO_cap = 4; 7290 sc->sc_wdcdev.DMA_cap = 2; 7291 sc->sc_wdcdev.UDMA_cap = 6; 7292 7293 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7294 sc->sc_wdcdev.nchannels = 4; 7295 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7296 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7297 sc->sc_wdcdev.set_modes = sata_setup_channel; 7298 7299 /* We can use SControl and SStatus to probe for drives. */ 7300 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7301 7302 /* Map and establish the interrupt handler. */ 7303 if(pci_intr_map(pa, &intrhandle) != 0) { 7304 printf("%s: couldn't map native-PCI interrupt\n", 7305 sc->sc_wdcdev.sc_dev.dv_xname); 7306 return; 7307 } 7308 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7309 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7310 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7311 if (sc->sc_pci_ih != NULL) { 7312 printf("%s: using %s for native-PCI interrupt\n", 7313 sc->sc_wdcdev.sc_dev.dv_xname, 7314 intrstr ? intrstr : "unknown interrupt"); 7315 } else { 7316 printf("%s: couldn't establish native-PCI interrupt", 7317 sc->sc_wdcdev.sc_dev.dv_xname); 7318 if (intrstr != NULL) 7319 printf(" at %s", intrstr); 7320 printf("\n"); 7321 return; 7322 } 7323 7324 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7325 cp = &sc->pciide_channels[channel]; 7326 if (pciide_chansetup(sc, channel, 0) == 0) 7327 continue; 7328 svwsata_mapchan(cp); 7329 sata_setup_channel(&cp->wdc_channel); 7330 } 7331 } 7332 7333 void 7334 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7335 { 7336 struct pciide_svwsata *ss = sc->sc_cookie; 7337 7338 sc->sc_wdcdev.dma_arg = sc; 7339 sc->sc_wdcdev.dma_init = pciide_dma_init; 7340 sc->sc_wdcdev.dma_start = pciide_dma_start; 7341 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7342 7343 /* XXX */ 7344 sc->sc_dma_iot = ss->ba5_st; 7345 sc->sc_dma_ioh = ss->ba5_sh; 7346 7347 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7348 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7349 sc->sc_dmactl_read = svwsata_dmactl_read; 7350 sc->sc_dmactl_write = svwsata_dmactl_write; 7351 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7352 7353 /* DMA registers all set up! */ 7354 sc->sc_dmat = pa->pa_dmat; 7355 sc->sc_dma_ok = 1; 7356 } 7357 7358 u_int8_t 7359 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7360 { 7361 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7362 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7363 } 7364 7365 void 7366 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7367 { 7368 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7369 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7370 } 7371 7372 u_int8_t 7373 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7374 { 7375 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7376 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7377 } 7378 7379 void 7380 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7381 { 7382 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7383 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7384 } 7385 7386 void 7387 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7388 { 7389 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7390 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7391 } 7392 7393 void 7394 svwsata_mapchan(struct pciide_channel *cp) 7395 { 7396 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7397 struct channel_softc *wdc_cp = &cp->wdc_channel; 7398 struct pciide_svwsata *ss = sc->sc_cookie; 7399 7400 cp->compat = 0; 7401 cp->ih = sc->sc_pci_ih; 7402 7403 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7404 (wdc_cp->channel << 8) + SVWSATA_TF0, 7405 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7406 printf("%s: couldn't map %s cmd regs\n", 7407 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7408 return; 7409 } 7410 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7411 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7412 &wdc_cp->ctl_ioh) != 0) { 7413 printf("%s: couldn't map %s ctl regs\n", 7414 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7415 return; 7416 } 7417 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7418 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7419 wdcattach(wdc_cp); 7420 } 7421 7422 void 7423 svwsata_drv_probe(struct channel_softc *chp) 7424 { 7425 struct pciide_channel *cp = (struct pciide_channel *)chp; 7426 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7427 struct pciide_svwsata *ss = sc->sc_cookie; 7428 int channel = chp->channel; 7429 uint32_t scontrol, sstatus; 7430 uint8_t scnt, sn, cl, ch; 7431 int i, s; 7432 7433 /* XXX This should be done by other code. */ 7434 for (i = 0; i < 2; i++) { 7435 chp->ch_drive[i].chnl_softc = chp; 7436 chp->ch_drive[i].drive = i; 7437 } 7438 7439 /* 7440 * Request communication initialization sequence, any speed. 7441 * Performing this is the equivalent of an ATA Reset. 7442 */ 7443 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7444 7445 /* 7446 * XXX We don't yet support SATA power management; disable all 7447 * power management state transitions. 7448 */ 7449 scontrol |= SControl_IPM_NONE; 7450 7451 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7452 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7453 delay(50 * 1000); 7454 scontrol &= ~SControl_DET_INIT; 7455 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7456 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7457 delay(50 * 1000); 7458 7459 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7460 (channel << 8) + SVWSATA_SSTATUS); 7461 #if 0 7462 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7463 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7464 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7465 (channel << 8) + SVWSATA_SSTATUS)); 7466 #endif 7467 switch (sstatus & SStatus_DET_mask) { 7468 case SStatus_DET_NODEV: 7469 /* No device; be silent. */ 7470 break; 7471 7472 case SStatus_DET_DEV_NE: 7473 printf("%s: port %d: device connected, but " 7474 "communication not established\n", 7475 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7476 break; 7477 7478 case SStatus_DET_OFFLINE: 7479 printf("%s: port %d: PHY offline\n", 7480 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7481 break; 7482 7483 case SStatus_DET_DEV: 7484 /* 7485 * XXX ATAPI detection doesn't currently work. Don't 7486 * XXX know why. But, it's not like the standard method 7487 * XXX can detect an ATAPI device connected via a SATA/PATA 7488 * XXX bridge, so at least this is no worse. --thorpej 7489 */ 7490 if (chp->_vtbl != NULL) 7491 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7492 else 7493 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7494 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7495 delay(10); /* 400ns delay */ 7496 /* Save register contents. */ 7497 if (chp->_vtbl != NULL) { 7498 scnt = CHP_READ_REG(chp, wdr_seccnt); 7499 sn = CHP_READ_REG(chp, wdr_sector); 7500 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7501 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7502 } else { 7503 scnt = bus_space_read_1(chp->cmd_iot, 7504 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7505 sn = bus_space_read_1(chp->cmd_iot, 7506 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7507 cl = bus_space_read_1(chp->cmd_iot, 7508 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7509 ch = bus_space_read_1(chp->cmd_iot, 7510 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7511 } 7512 #if 0 7513 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7514 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7515 scnt, sn, cl, ch); 7516 #endif 7517 /* 7518 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7519 * cases we get wrong values here, so ignore it. 7520 */ 7521 s = splbio(); 7522 if (cl == 0x14 && ch == 0xeb) 7523 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7524 else 7525 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7526 splx(s); 7527 7528 printf("%s: port %d: device present", 7529 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7530 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7531 case 1: 7532 printf(", speed: 1.5Gb/s"); 7533 break; 7534 case 2: 7535 printf(", speed: 3.0Gb/s"); 7536 break; 7537 } 7538 printf("\n"); 7539 break; 7540 7541 default: 7542 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7543 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7544 } 7545 } 7546 7547 u_int8_t 7548 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7549 { 7550 if (reg & _WDC_AUX) { 7551 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 7552 (reg & _WDC_REGMASK) << 2)); 7553 } else { 7554 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 7555 (reg & _WDC_REGMASK) << 2)); 7556 } 7557 } 7558 7559 void 7560 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7561 { 7562 if (reg & _WDC_AUX) { 7563 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7564 (reg & _WDC_REGMASK) << 2, val); 7565 } else { 7566 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7567 (reg & _WDC_REGMASK) << 2, val); 7568 } 7569 } 7570 7571 void 7572 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 7573 { 7574 if (reg & _WDC_AUX) { 7575 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7576 (reg & _WDC_REGMASK) << 2, val); 7577 } else { 7578 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7579 (reg & _WDC_REGMASK) << 2, val); 7580 } 7581 } 7582 7583 #define ACARD_IS_850(sc) \ 7584 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 7585 7586 void 7587 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7588 { 7589 struct pciide_channel *cp; 7590 int i; 7591 pcireg_t interface; 7592 bus_size_t cmdsize, ctlsize; 7593 7594 /* 7595 * when the chip is in native mode it identifies itself as a 7596 * 'misc mass storage'. Fake interface in this case. 7597 */ 7598 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 7599 interface = PCI_INTERFACE(pa->pa_class); 7600 } else { 7601 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 7602 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 7603 } 7604 7605 printf(": DMA"); 7606 pciide_mapreg_dma(sc, pa); 7607 printf("\n"); 7608 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7609 WDC_CAPABILITY_MODE; 7610 7611 if (sc->sc_dma_ok) { 7612 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7613 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7614 sc->sc_wdcdev.irqack = pciide_irqack; 7615 } 7616 sc->sc_wdcdev.PIO_cap = 4; 7617 sc->sc_wdcdev.DMA_cap = 2; 7618 switch (sc->sc_pp->ide_product) { 7619 case PCI_PRODUCT_ACARD_ATP850U: 7620 sc->sc_wdcdev.UDMA_cap = 2; 7621 break; 7622 case PCI_PRODUCT_ACARD_ATP860: 7623 case PCI_PRODUCT_ACARD_ATP860A: 7624 sc->sc_wdcdev.UDMA_cap = 4; 7625 break; 7626 case PCI_PRODUCT_ACARD_ATP865A: 7627 case PCI_PRODUCT_ACARD_ATP865R: 7628 sc->sc_wdcdev.UDMA_cap = 6; 7629 break; 7630 } 7631 7632 sc->sc_wdcdev.set_modes = acard_setup_channel; 7633 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7634 sc->sc_wdcdev.nchannels = 2; 7635 7636 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7637 cp = &sc->pciide_channels[i]; 7638 if (pciide_chansetup(sc, i, interface) == 0) 7639 continue; 7640 if (interface & PCIIDE_INTERFACE_PCI(i)) { 7641 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 7642 &ctlsize, pciide_pci_intr); 7643 } else { 7644 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 7645 &cmdsize, &ctlsize); 7646 } 7647 if (cp->hw_ok == 0) 7648 return; 7649 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 7650 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 7651 wdcattach(&cp->wdc_channel); 7652 acard_setup_channel(&cp->wdc_channel); 7653 } 7654 if (!ACARD_IS_850(sc)) { 7655 u_int32_t reg; 7656 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 7657 reg &= ~ATP860_CTRL_INT; 7658 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 7659 } 7660 } 7661 7662 void 7663 acard_setup_channel(struct channel_softc *chp) 7664 { 7665 struct ata_drive_datas *drvp; 7666 struct pciide_channel *cp = (struct pciide_channel *)chp; 7667 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7668 int channel = chp->channel; 7669 int drive; 7670 u_int32_t idetime, udma_mode; 7671 u_int32_t idedma_ctl; 7672 7673 /* setup DMA if needed */ 7674 pciide_channel_dma_setup(cp); 7675 7676 if (ACARD_IS_850(sc)) { 7677 idetime = 0; 7678 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 7679 udma_mode &= ~ATP850_UDMA_MASK(channel); 7680 } else { 7681 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 7682 idetime &= ~ATP860_SETTIME_MASK(channel); 7683 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 7684 udma_mode &= ~ATP860_UDMA_MASK(channel); 7685 } 7686 7687 idedma_ctl = 0; 7688 7689 /* Per drive settings */ 7690 for (drive = 0; drive < 2; drive++) { 7691 drvp = &chp->ch_drive[drive]; 7692 /* If no drive, skip */ 7693 if ((drvp->drive_flags & DRIVE) == 0) 7694 continue; 7695 /* add timing values, setup DMA if needed */ 7696 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7697 (drvp->drive_flags & DRIVE_UDMA)) { 7698 /* use Ultra/DMA */ 7699 if (ACARD_IS_850(sc)) { 7700 idetime |= ATP850_SETTIME(drive, 7701 acard_act_udma[drvp->UDMA_mode], 7702 acard_rec_udma[drvp->UDMA_mode]); 7703 udma_mode |= ATP850_UDMA_MODE(channel, drive, 7704 acard_udma_conf[drvp->UDMA_mode]); 7705 } else { 7706 idetime |= ATP860_SETTIME(channel, drive, 7707 acard_act_udma[drvp->UDMA_mode], 7708 acard_rec_udma[drvp->UDMA_mode]); 7709 udma_mode |= ATP860_UDMA_MODE(channel, drive, 7710 acard_udma_conf[drvp->UDMA_mode]); 7711 } 7712 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7713 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7714 (drvp->drive_flags & DRIVE_DMA)) { 7715 /* use Multiword DMA */ 7716 drvp->drive_flags &= ~DRIVE_UDMA; 7717 if (ACARD_IS_850(sc)) { 7718 idetime |= ATP850_SETTIME(drive, 7719 acard_act_dma[drvp->DMA_mode], 7720 acard_rec_dma[drvp->DMA_mode]); 7721 } else { 7722 idetime |= ATP860_SETTIME(channel, drive, 7723 acard_act_dma[drvp->DMA_mode], 7724 acard_rec_dma[drvp->DMA_mode]); 7725 } 7726 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7727 } else { 7728 /* PIO only */ 7729 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7730 if (ACARD_IS_850(sc)) { 7731 idetime |= ATP850_SETTIME(drive, 7732 acard_act_pio[drvp->PIO_mode], 7733 acard_rec_pio[drvp->PIO_mode]); 7734 } else { 7735 idetime |= ATP860_SETTIME(channel, drive, 7736 acard_act_pio[drvp->PIO_mode], 7737 acard_rec_pio[drvp->PIO_mode]); 7738 } 7739 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 7740 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 7741 | ATP8x0_CTRL_EN(channel)); 7742 } 7743 } 7744 7745 if (idedma_ctl != 0) { 7746 /* Add software bits in status register */ 7747 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7748 IDEDMA_CTL(channel), idedma_ctl); 7749 } 7750 pciide_print_modes(cp); 7751 7752 if (ACARD_IS_850(sc)) { 7753 pci_conf_write(sc->sc_pc, sc->sc_tag, 7754 ATP850_IDETIME(channel), idetime); 7755 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 7756 } else { 7757 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 7758 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 7759 } 7760 } 7761 7762 void 7763 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7764 { 7765 struct pciide_channel *cp; 7766 int channel; 7767 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7768 bus_size_t cmdsize, ctlsize; 7769 u_int32_t conf; 7770 7771 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 7772 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 7773 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 7774 7775 printf(": DMA"); 7776 pciide_mapreg_dma(sc, pa); 7777 7778 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7779 WDC_CAPABILITY_MODE; 7780 if (sc->sc_dma_ok) { 7781 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7782 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7783 sc->sc_wdcdev.irqack = pciide_irqack; 7784 } 7785 sc->sc_wdcdev.PIO_cap = 4; 7786 sc->sc_wdcdev.DMA_cap = 2; 7787 switch (sc->sc_pp->ide_product) { 7788 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 7789 sc->sc_wdcdev.UDMA_cap = 5; 7790 break; 7791 default: 7792 sc->sc_wdcdev.UDMA_cap = 6; 7793 } 7794 sc->sc_wdcdev.set_modes = nforce_setup_channel; 7795 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7796 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7797 7798 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7799 7800 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7801 cp = &sc->pciide_channels[channel]; 7802 7803 if (pciide_chansetup(sc, channel, interface) == 0) 7804 continue; 7805 7806 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 7807 printf("%s: %s ignored (disabled)\n", 7808 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7809 continue; 7810 } 7811 7812 pciide_map_compat_intr(pa, cp, channel, interface); 7813 if (cp->hw_ok == 0) 7814 continue; 7815 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7816 nforce_pci_intr); 7817 if (cp->hw_ok == 0) { 7818 pciide_unmap_compat_intr(pa, cp, channel, interface); 7819 continue; 7820 } 7821 7822 if (pciide_chan_candisable(cp)) { 7823 conf &= ~NFORCE_CHAN_EN(channel); 7824 pciide_unmap_compat_intr(pa, cp, channel, interface); 7825 continue; 7826 } 7827 7828 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 7829 } 7830 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 7831 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 7832 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 7833 } 7834 7835 void 7836 nforce_setup_channel(struct channel_softc *chp) 7837 { 7838 struct ata_drive_datas *drvp; 7839 int drive, mode; 7840 u_int32_t idedma_ctl; 7841 struct pciide_channel *cp = (struct pciide_channel *)chp; 7842 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7843 int channel = chp->channel; 7844 u_int32_t conf, piodmatim, piotim, udmatim; 7845 7846 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 7847 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 7848 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 7849 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 7850 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 7851 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 7852 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 7853 7854 /* Setup DMA if needed */ 7855 pciide_channel_dma_setup(cp); 7856 7857 /* Clear all bits for this channel */ 7858 idedma_ctl = 0; 7859 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 7860 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 7861 7862 /* Per channel settings */ 7863 for (drive = 0; drive < 2; drive++) { 7864 drvp = &chp->ch_drive[drive]; 7865 7866 /* If no drive, skip */ 7867 if ((drvp->drive_flags & DRIVE) == 0) 7868 continue; 7869 7870 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 7871 (drvp->drive_flags & DRIVE_UDMA) != 0) { 7872 /* Setup UltraDMA mode */ 7873 drvp->drive_flags &= ~DRIVE_DMA; 7874 7875 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 7876 nforce_udma[drvp->UDMA_mode]) | 7877 NFORCE_UDMA_EN(channel, drive) | 7878 NFORCE_UDMA_ENM(channel, drive); 7879 7880 mode = drvp->PIO_mode; 7881 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 7882 (drvp->drive_flags & DRIVE_DMA) != 0) { 7883 /* Setup multiword DMA mode */ 7884 drvp->drive_flags &= ~DRIVE_UDMA; 7885 7886 /* mode = min(pio, dma + 2) */ 7887 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 7888 mode = drvp->PIO_mode; 7889 else 7890 mode = drvp->DMA_mode + 2; 7891 } else { 7892 mode = drvp->PIO_mode; 7893 goto pio; 7894 } 7895 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7896 7897 pio: 7898 /* Setup PIO mode */ 7899 if (mode <= 2) { 7900 drvp->DMA_mode = 0; 7901 drvp->PIO_mode = 0; 7902 mode = 0; 7903 } else { 7904 drvp->PIO_mode = mode; 7905 drvp->DMA_mode = mode - 2; 7906 } 7907 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 7908 nforce_pio[mode]); 7909 } 7910 7911 if (idedma_ctl != 0) { 7912 /* Add software bits in status register */ 7913 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7914 IDEDMA_CTL(channel), idedma_ctl); 7915 } 7916 7917 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 7918 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 7919 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 7920 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 7921 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 7922 7923 pciide_print_modes(cp); 7924 } 7925 7926 int 7927 nforce_pci_intr(void *arg) 7928 { 7929 struct pciide_softc *sc = arg; 7930 struct pciide_channel *cp; 7931 struct channel_softc *wdc_cp; 7932 int i, rv, crv; 7933 u_int32_t dmastat; 7934 7935 rv = 0; 7936 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7937 cp = &sc->pciide_channels[i]; 7938 wdc_cp = &cp->wdc_channel; 7939 7940 /* Skip compat channel */ 7941 if (cp->compat) 7942 continue; 7943 7944 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7945 IDEDMA_CTL(i)); 7946 if ((dmastat & IDEDMA_CTL_INTR) == 0) 7947 continue; 7948 7949 crv = wdcintr(wdc_cp); 7950 if (crv == 0) 7951 printf("%s:%d: bogus intr\n", 7952 sc->sc_wdcdev.sc_dev.dv_xname, i); 7953 else 7954 rv = 1; 7955 } 7956 return (rv); 7957 } 7958 7959 void 7960 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7961 { 7962 struct pciide_channel *cp; 7963 bus_size_t cmdsize, ctlsize; 7964 pcireg_t interface; 7965 int channel; 7966 7967 printf(": DMA"); 7968 #ifdef PCIIDE_I31244_DISABLEDMA 7969 if (sc->sc_rev == 0) { 7970 printf(" disabled due to rev. 0"); 7971 sc->sc_dma_ok = 0; 7972 } else 7973 #endif 7974 pciide_mapreg_dma(sc, pa); 7975 printf("\n"); 7976 7977 /* 7978 * XXX Configure LEDs to show activity. 7979 */ 7980 7981 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7982 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7983 sc->sc_wdcdev.PIO_cap = 4; 7984 if (sc->sc_dma_ok) { 7985 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7986 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7987 sc->sc_wdcdev.irqack = pciide_irqack; 7988 sc->sc_wdcdev.DMA_cap = 2; 7989 sc->sc_wdcdev.UDMA_cap = 6; 7990 } 7991 sc->sc_wdcdev.set_modes = sata_setup_channel; 7992 7993 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7994 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7995 7996 interface = PCI_INTERFACE(pa->pa_class); 7997 7998 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7999 cp = &sc->pciide_channels[channel]; 8000 if (pciide_chansetup(sc, channel, interface) == 0) 8001 continue; 8002 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8003 pciide_pci_intr); 8004 if (cp->hw_ok == 0) 8005 continue; 8006 pciide_map_compat_intr(pa, cp, channel, interface); 8007 sata_setup_channel(&cp->wdc_channel); 8008 } 8009 } 8010 8011 void 8012 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8013 { 8014 struct pciide_channel *cp; 8015 int channel; 8016 pcireg_t interface; 8017 bus_size_t cmdsize, ctlsize; 8018 pcireg_t cfg, modectl; 8019 8020 /* 8021 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8022 */ 8023 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8024 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8025 8026 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8027 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8028 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8029 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8030 modectl & IT_MODE_MASK), DEBUG_PROBE); 8031 8032 printf(": DMA"); 8033 pciide_mapreg_dma(sc, pa); 8034 8035 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8036 WDC_CAPABILITY_MODE; 8037 if (sc->sc_dma_ok) { 8038 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8039 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8040 sc->sc_wdcdev.irqack = pciide_irqack; 8041 } 8042 sc->sc_wdcdev.PIO_cap = 4; 8043 sc->sc_wdcdev.DMA_cap = 2; 8044 sc->sc_wdcdev.UDMA_cap = 6; 8045 8046 sc->sc_wdcdev.set_modes = ite_setup_channel; 8047 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8048 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8049 8050 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8051 8052 /* Disable RAID */ 8053 modectl &= ~IT_MODE_RAID1; 8054 /* Disable CPU firmware mode */ 8055 modectl &= ~IT_MODE_CPU; 8056 8057 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8058 8059 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8060 cp = &sc->pciide_channels[channel]; 8061 8062 if (pciide_chansetup(sc, channel, interface) == 0) 8063 continue; 8064 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8065 pciide_pci_intr); 8066 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8067 } 8068 8069 /* Re-read configuration registers after channels setup */ 8070 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8071 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8072 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8073 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8074 modectl & IT_MODE_MASK), DEBUG_PROBE); 8075 } 8076 8077 void 8078 ite_setup_channel(struct channel_softc *chp) 8079 { 8080 struct ata_drive_datas *drvp; 8081 int drive, mode; 8082 u_int32_t idedma_ctl; 8083 struct pciide_channel *cp = (struct pciide_channel *)chp; 8084 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8085 int channel = chp->channel; 8086 pcireg_t cfg, modectl; 8087 pcireg_t tim; 8088 8089 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8090 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8091 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8092 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8093 channel, tim), DEBUG_PROBE); 8094 8095 /* Setup DMA if needed */ 8096 pciide_channel_dma_setup(cp); 8097 8098 /* Clear all bits for this channel */ 8099 idedma_ctl = 0; 8100 8101 /* Per channel settings */ 8102 for (drive = 0; drive < 2; drive++) { 8103 drvp = &chp->ch_drive[drive]; 8104 8105 /* If no drive, skip */ 8106 if ((drvp->drive_flags & DRIVE) == 0) 8107 continue; 8108 8109 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8110 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8111 /* Setup UltraDMA mode */ 8112 drvp->drive_flags &= ~DRIVE_DMA; 8113 modectl &= ~IT_MODE_DMA(channel, drive); 8114 8115 #if 0 8116 /* Check cable, works only in CPU firmware mode */ 8117 if (drvp->UDMA_mode > 2 && 8118 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8119 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8120 "80-wire cable not detected\n", 8121 drvp->drive_name, 8122 sc->sc_wdcdev.sc_dev.dv_xname, 8123 channel, drive), DEBUG_PROBE); 8124 drvp->UDMA_mode = 2; 8125 } 8126 #endif 8127 8128 if (drvp->UDMA_mode >= 5) 8129 tim |= IT_TIM_UDMA5(drive); 8130 else 8131 tim &= ~IT_TIM_UDMA5(drive); 8132 8133 mode = drvp->PIO_mode; 8134 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8135 (drvp->drive_flags & DRIVE_DMA) != 0) { 8136 /* Setup multiword DMA mode */ 8137 drvp->drive_flags &= ~DRIVE_UDMA; 8138 modectl |= IT_MODE_DMA(channel, drive); 8139 8140 /* mode = min(pio, dma + 2) */ 8141 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8142 mode = drvp->PIO_mode; 8143 else 8144 mode = drvp->DMA_mode + 2; 8145 } else { 8146 goto pio; 8147 } 8148 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8149 8150 pio: 8151 /* Setup PIO mode */ 8152 if (mode <= 2) { 8153 drvp->DMA_mode = 0; 8154 drvp->PIO_mode = 0; 8155 mode = 0; 8156 } else { 8157 drvp->PIO_mode = mode; 8158 drvp->DMA_mode = mode - 2; 8159 } 8160 8161 /* Enable IORDY if PIO mode >= 3 */ 8162 if (drvp->PIO_mode >= 3) 8163 cfg |= IT_CFG_IORDY(channel); 8164 } 8165 8166 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8167 tim), DEBUG_PROBE); 8168 8169 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8170 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8171 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8172 8173 if (idedma_ctl != 0) { 8174 /* Add software bits in status register */ 8175 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8176 IDEDMA_CTL(channel), idedma_ctl); 8177 } 8178 8179 pciide_print_modes(cp); 8180 } 8181 8182 void 8183 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8184 { 8185 struct pciide_channel *cp; 8186 int channel; 8187 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8188 bus_size_t cmdsize, ctlsize; 8189 8190 printf(": DMA"); 8191 pciide_mapreg_dma(sc, pa); 8192 8193 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8194 WDC_CAPABILITY_MODE; 8195 if (sc->sc_dma_ok) { 8196 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8197 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8198 sc->sc_wdcdev.irqack = pciide_irqack; 8199 } 8200 sc->sc_wdcdev.PIO_cap = 4; 8201 sc->sc_wdcdev.DMA_cap = 2; 8202 sc->sc_wdcdev.UDMA_cap = 6; 8203 8204 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8205 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8206 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8207 8208 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8209 8210 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8211 cp = &sc->pciide_channels[channel]; 8212 if (pciide_chansetup(sc, channel, interface) == 0) 8213 continue; 8214 pciide_map_compat_intr(pa, cp, channel, interface); 8215 if (cp->hw_ok == 0) 8216 continue; 8217 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8218 pciide_pci_intr); 8219 if (cp->hw_ok == 0) { 8220 pciide_unmap_compat_intr(pa, cp, channel, interface); 8221 continue; 8222 } 8223 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8224 } 8225 } 8226 8227 void 8228 ixp_setup_channel(struct channel_softc *chp) 8229 { 8230 struct ata_drive_datas *drvp; 8231 int drive, mode; 8232 u_int32_t idedma_ctl; 8233 struct pciide_channel *cp = (struct pciide_channel*)chp; 8234 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8235 int channel = chp->channel; 8236 pcireg_t udma, mdma_timing, pio, pio_timing; 8237 8238 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8239 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8240 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8241 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8242 8243 /* Setup DMA if needed */ 8244 pciide_channel_dma_setup(cp); 8245 8246 idedma_ctl = 0; 8247 8248 /* Per channel settings */ 8249 for (drive = 0; drive < 2; drive++) { 8250 drvp = &chp->ch_drive[drive]; 8251 8252 /* If no drive, skip */ 8253 if ((drvp->drive_flags & DRIVE) == 0) 8254 continue; 8255 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8256 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8257 /* Setup UltraDMA mode */ 8258 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8259 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8260 IXP_SET_MODE(udma, chp->channel, drive, 8261 drvp->UDMA_mode); 8262 mode = drvp->PIO_mode; 8263 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8264 (drvp->drive_flags & DRIVE_DMA) != 0) { 8265 /* Setup multiword DMA mode */ 8266 drvp->drive_flags &= ~DRIVE_UDMA; 8267 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8268 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8269 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8270 ixp_mdma_timings[drvp->DMA_mode]); 8271 8272 /* mode = min(pio, dma + 2) */ 8273 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8274 mode = drvp->PIO_mode; 8275 else 8276 mode = drvp->DMA_mode + 2; 8277 } else { 8278 mode = drvp->PIO_mode; 8279 } 8280 8281 /* Setup PIO mode */ 8282 drvp->PIO_mode = mode; 8283 if (mode < 2) 8284 drvp->DMA_mode = 0; 8285 else 8286 drvp->DMA_mode = mode - 2; 8287 /* 8288 * Set PIO mode and timings 8289 * Linux driver avoids PIO mode 1, let's do it too. 8290 */ 8291 if (drvp->PIO_mode == 1) 8292 drvp->PIO_mode = 0; 8293 8294 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8295 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8296 ixp_pio_timings[drvp->PIO_mode]); 8297 } 8298 8299 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8300 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8301 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8302 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8303 8304 if (idedma_ctl != 0) { 8305 /* Add software bits in status register */ 8306 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8307 IDEDMA_CTL(channel), idedma_ctl); 8308 } 8309 8310 pciide_print_modes(cp); 8311 } 8312 8313 void 8314 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8315 { 8316 struct pciide_channel *cp; 8317 int channel; 8318 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8319 bus_size_t cmdsize, ctlsize; 8320 u_int32_t conf; 8321 8322 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8323 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8324 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8325 8326 printf(": DMA"); 8327 pciide_mapreg_dma(sc, pa); 8328 8329 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8330 WDC_CAPABILITY_MODE; 8331 if (sc->sc_dma_ok) { 8332 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8333 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8334 sc->sc_wdcdev.irqack = pciide_irqack; 8335 } 8336 sc->sc_wdcdev.PIO_cap = 4; 8337 sc->sc_wdcdev.DMA_cap = 2; 8338 sc->sc_wdcdev.UDMA_cap = 6; 8339 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8340 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8341 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8342 8343 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8344 8345 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8346 cp = &sc->pciide_channels[channel]; 8347 8348 if (pciide_chansetup(sc, channel, interface) == 0) 8349 continue; 8350 8351 #if 0 8352 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8353 printf("%s: %s ignored (disabled)\n", 8354 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8355 continue; 8356 } 8357 #endif 8358 8359 pciide_map_compat_intr(pa, cp, channel, interface); 8360 if (cp->hw_ok == 0) 8361 continue; 8362 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8363 pciide_pci_intr); 8364 if (cp->hw_ok == 0) { 8365 pciide_unmap_compat_intr(pa, cp, channel, interface); 8366 continue; 8367 } 8368 8369 if (pciide_chan_candisable(cp)) { 8370 conf &= ~JMICRON_CHAN_EN(channel); 8371 pciide_unmap_compat_intr(pa, cp, channel, interface); 8372 continue; 8373 } 8374 8375 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8376 } 8377 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8378 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8379 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8380 } 8381 8382 void 8383 jmicron_setup_channel(struct channel_softc *chp) 8384 { 8385 struct ata_drive_datas *drvp; 8386 int drive, mode; 8387 u_int32_t idedma_ctl; 8388 struct pciide_channel *cp = (struct pciide_channel *)chp; 8389 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8390 int channel = chp->channel; 8391 u_int32_t conf; 8392 8393 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8394 8395 /* Setup DMA if needed */ 8396 pciide_channel_dma_setup(cp); 8397 8398 /* Clear all bits for this channel */ 8399 idedma_ctl = 0; 8400 8401 /* Per channel settings */ 8402 for (drive = 0; drive < 2; drive++) { 8403 drvp = &chp->ch_drive[drive]; 8404 8405 /* If no drive, skip */ 8406 if ((drvp->drive_flags & DRIVE) == 0) 8407 continue; 8408 8409 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8410 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8411 /* Setup UltraDMA mode */ 8412 drvp->drive_flags &= ~DRIVE_DMA; 8413 8414 /* see if cable is up to scratch */ 8415 if ((conf & JMICRON_CONF_40PIN) && 8416 (drvp->UDMA_mode > 2)) 8417 drvp->UDMA_mode = 2; 8418 8419 mode = drvp->PIO_mode; 8420 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8421 (drvp->drive_flags & DRIVE_DMA) != 0) { 8422 /* Setup multiword DMA mode */ 8423 drvp->drive_flags &= ~DRIVE_UDMA; 8424 8425 /* mode = min(pio, dma + 2) */ 8426 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8427 mode = drvp->PIO_mode; 8428 else 8429 mode = drvp->DMA_mode + 2; 8430 } else { 8431 mode = drvp->PIO_mode; 8432 goto pio; 8433 } 8434 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8435 8436 pio: 8437 /* Setup PIO mode */ 8438 if (mode <= 2) { 8439 drvp->DMA_mode = 0; 8440 drvp->PIO_mode = 0; 8441 } else { 8442 drvp->PIO_mode = mode; 8443 drvp->DMA_mode = mode - 2; 8444 } 8445 } 8446 8447 if (idedma_ctl != 0) { 8448 /* Add software bits in status register */ 8449 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8450 IDEDMA_CTL(channel), idedma_ctl); 8451 } 8452 8453 pciide_print_modes(cp); 8454 } 8455