1 /* $OpenBSD: pciide.c,v 1.294 2009/04/24 07:59:50 jsg Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 /* 36 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by Christopher G. Demetriou 49 * for the NetBSD Project. 50 * 4. The name of the author may not be used to endorse or promote products 51 * derived from this software without specific prior written permission 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 57 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 58 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 62 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 /* 66 * PCI IDE controller driver. 67 * 68 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 69 * sys/dev/pci/ppb.c, revision 1.16). 70 * 71 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 72 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 73 * 5/16/94" from the PCI SIG. 74 * 75 */ 76 77 #define DEBUG_DMA 0x01 78 #define DEBUG_XFERS 0x02 79 #define DEBUG_FUNCS 0x08 80 #define DEBUG_PROBE 0x10 81 82 #ifdef WDCDEBUG 83 #ifndef WDCDEBUG_PCIIDE_MASK 84 #define WDCDEBUG_PCIIDE_MASK 0x00 85 #endif 86 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 87 #define WDCDEBUG_PRINT(args, level) do { \ 88 if ((wdcdebug_pciide_mask & (level)) != 0) \ 89 printf args; \ 90 } while (0) 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <machine/bus.h> 100 #include <machine/endian.h> 101 102 #include <dev/ata/atavar.h> 103 #include <dev/ata/satareg.h> 104 #include <dev/ic/wdcreg.h> 105 #include <dev/ic/wdcvar.h> 106 107 #include <dev/pci/pcireg.h> 108 #include <dev/pci/pcivar.h> 109 #include <dev/pci/pcidevs.h> 110 111 #include <dev/pci/pciidereg.h> 112 #include <dev/pci/pciidevar.h> 113 #include <dev/pci/pciide_piix_reg.h> 114 #include <dev/pci/pciide_amd_reg.h> 115 #include <dev/pci/pciide_apollo_reg.h> 116 #include <dev/pci/pciide_cmd_reg.h> 117 #include <dev/pci/pciide_sii3112_reg.h> 118 #include <dev/pci/pciide_cy693_reg.h> 119 #include <dev/pci/pciide_sis_reg.h> 120 #include <dev/pci/pciide_acer_reg.h> 121 #include <dev/pci/pciide_pdc202xx_reg.h> 122 #include <dev/pci/pciide_opti_reg.h> 123 #include <dev/pci/pciide_hpt_reg.h> 124 #include <dev/pci/pciide_acard_reg.h> 125 #include <dev/pci/pciide_natsemi_reg.h> 126 #include <dev/pci/pciide_nforce_reg.h> 127 #include <dev/pci/pciide_i31244_reg.h> 128 #include <dev/pci/pciide_ite_reg.h> 129 #include <dev/pci/pciide_ixp_reg.h> 130 #include <dev/pci/pciide_svwsata_reg.h> 131 #include <dev/pci/pciide_jmicron_reg.h> 132 #include <dev/pci/cy82c693var.h> 133 134 /* functions for reading/writing 8-bit PCI registers */ 135 136 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 137 int); 138 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 139 int, u_int8_t); 140 141 u_int8_t 142 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 143 { 144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 145 ((reg & 0x03) * 8) & 0xff); 146 } 147 148 void 149 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 150 { 151 pcireg_t pcival; 152 153 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 154 pcival &= ~(0xff << ((reg & 0x03) * 8)); 155 pcival |= (val << ((reg & 0x03) * 8)); 156 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 157 } 158 159 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 160 161 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 162 void sata_setup_channel(struct channel_softc *); 163 164 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 165 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 166 void piix_setup_channel(struct channel_softc *); 167 void piix3_4_setup_channel(struct channel_softc *); 168 void piix_timing_debug(struct pciide_softc *); 169 170 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 171 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 172 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 173 174 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 175 void amd756_setup_channel(struct channel_softc *); 176 177 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 178 void apollo_setup_channel(struct channel_softc *); 179 180 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 181 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 182 void cmd0643_9_setup_channel(struct channel_softc *); 183 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 184 void cmd680_setup_channel(struct channel_softc *); 185 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 186 void cmd_channel_map(struct pci_attach_args *, 187 struct pciide_softc *, int); 188 int cmd_pci_intr(void *); 189 void cmd646_9_irqack(struct channel_softc *); 190 191 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 192 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 193 void sii3112_setup_channel(struct channel_softc *); 194 void sii3112_drv_probe(struct channel_softc *); 195 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 196 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 197 int sii3114_chansetup(struct pciide_softc *, int); 198 void sii3114_mapchan(struct pciide_channel *); 199 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 200 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 201 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 202 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 203 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 204 205 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 206 void cy693_setup_channel(struct channel_softc *); 207 208 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 209 void sis_setup_channel(struct channel_softc *); 210 void sis96x_setup_channel(struct channel_softc *); 211 int sis_hostbr_match(struct pci_attach_args *); 212 int sis_south_match(struct pci_attach_args *); 213 214 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 215 void natsemi_setup_channel(struct channel_softc *); 216 int natsemi_pci_intr(void *); 217 void natsemi_irqack(struct channel_softc *); 218 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 219 void ns_scx200_setup_channel(struct channel_softc *); 220 221 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 222 void acer_setup_channel(struct channel_softc *); 223 int acer_pci_intr(void *); 224 225 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 226 void pdc202xx_setup_channel(struct channel_softc *); 227 void pdc20268_setup_channel(struct channel_softc *); 228 int pdc202xx_pci_intr(void *); 229 int pdc20265_pci_intr(void *); 230 void pdc20262_dma_start(void *, int, int); 231 int pdc20262_dma_finish(void *, int, int, int); 232 233 u_int8_t pdc268_config_read(struct channel_softc *, int); 234 235 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 236 void pdc203xx_setup_channel(struct channel_softc *); 237 int pdc203xx_pci_intr(void *); 238 void pdc203xx_irqack(struct channel_softc *); 239 void pdc203xx_dma_start(void *,int ,int); 240 int pdc203xx_dma_finish(void *, int, int, int); 241 int pdc205xx_pci_intr(void *); 242 void pdc205xx_do_reset(struct channel_softc *); 243 void pdc205xx_drv_probe(struct channel_softc *); 244 245 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 246 void opti_setup_channel(struct channel_softc *); 247 248 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 249 void hpt_setup_channel(struct channel_softc *); 250 int hpt_pci_intr(void *); 251 252 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 253 void acard_setup_channel(struct channel_softc *); 254 255 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 256 void serverworks_setup_channel(struct channel_softc *); 257 int serverworks_pci_intr(void *); 258 259 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 260 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 261 void svwsata_mapchan(struct pciide_channel *); 262 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 263 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 264 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 265 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 266 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 267 void svwsata_drv_probe(struct channel_softc *); 268 269 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 270 void nforce_setup_channel(struct channel_softc *); 271 int nforce_pci_intr(void *); 272 273 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 274 275 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 276 void ite_setup_channel(struct channel_softc *); 277 278 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 279 void ixp_setup_channel(struct channel_softc *); 280 281 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 282 void jmicron_setup_channel(struct channel_softc *); 283 284 struct pciide_product_desc { 285 u_int32_t ide_product; 286 u_short ide_flags; 287 /* map and setup chip, probe drives */ 288 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 289 }; 290 291 /* Flags for ide_flags */ 292 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 293 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 294 295 /* Default product description for devices not known from this controller */ 296 const struct pciide_product_desc default_product_desc = { 297 0, /* Generic PCI IDE controller */ 298 0, 299 default_chip_map 300 }; 301 302 const struct pciide_product_desc pciide_intel_products[] = { 303 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 304 0, 305 artisea_chip_map 306 }, 307 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 308 0, 309 default_chip_map 310 }, 311 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 312 0, 313 piix_chip_map 314 }, 315 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 316 0, 317 piix_chip_map 318 }, 319 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 320 0, 321 piix_chip_map 322 }, 323 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 324 0, 325 piix_chip_map 326 }, 327 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 328 0, 329 piix_chip_map 330 }, 331 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 332 0, 333 piix_chip_map 334 }, 335 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 336 0, 337 piix_chip_map 338 }, 339 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 340 0, 341 piix_chip_map 342 }, 343 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 344 0, 345 piix_chip_map 346 }, 347 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 348 0, 349 piix_chip_map 350 }, 351 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 352 0, 353 piix_chip_map 354 }, 355 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 356 0, 357 piix_chip_map 358 }, 359 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 360 0, 361 piix_chip_map 362 }, 363 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 364 0, 365 piix_chip_map 366 }, 367 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 368 0, 369 piix_chip_map 370 }, 371 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 372 0, 373 piix_chip_map 374 }, 375 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 376 0, 377 piix_chip_map 378 }, 379 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 380 0, 381 piix_chip_map 382 }, 383 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 384 0, 385 piixsata_chip_map 386 }, 387 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 388 0, 389 piixsata_chip_map 390 }, 391 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 392 0, 393 piix_chip_map 394 }, 395 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 396 0, 397 piixsata_chip_map 398 }, 399 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 400 0, 401 piixsata_chip_map 402 }, 403 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 404 0, 405 piix_chip_map 406 }, 407 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 408 0, 409 piix_chip_map 410 }, 411 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 412 0, 413 piixsata_chip_map 414 }, 415 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 416 0, 417 piixsata_chip_map 418 }, 419 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 420 0, 421 piixsata_chip_map 422 }, 423 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 424 0, 425 piix_chip_map 426 }, 427 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 428 0, 429 piixsata_chip_map 430 }, 431 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 432 0, 433 piixsata_chip_map 434 }, 435 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 436 0, 437 piixsata_chip_map 438 }, 439 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 440 0, 441 piixsata_chip_map 442 }, 443 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 444 0, 445 piixsata_chip_map 446 }, 447 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 448 0, 449 piixsata_chip_map 450 }, 451 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 452 0, 453 piixsata_chip_map 454 }, 455 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 456 0, 457 piixsata_chip_map 458 }, 459 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 460 0, 461 piixsata_chip_map 462 }, 463 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 464 0, 465 piixsata_chip_map 466 }, 467 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 468 0, 469 piixsata_chip_map 470 }, 471 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 472 0, 473 piixsata_chip_map 474 }, 475 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 476 0, 477 piixsata_chip_map 478 }, 479 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 480 0, 481 piixsata_chip_map 482 }, 483 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 484 0, 485 piix_chip_map 486 }, 487 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 488 0, 489 piixsata_chip_map 490 }, 491 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 492 0, 493 piixsata_chip_map 494 }, 495 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 496 0, 497 piixsata_chip_map 498 }, 499 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 500 0, 501 piixsata_chip_map 502 }, 503 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 504 0, 505 piixsata_chip_map 506 }, 507 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 508 0, 509 piixsata_chip_map 510 }, 511 { PCI_PRODUCT_INTEL_82801JD_SATA_1, /* Intel 82801JD (ICH10) SATA */ 512 0, 513 piixsata_chip_map 514 }, 515 { PCI_PRODUCT_INTEL_82801JD_SATA_2, /* Intel 82801JD (ICH10) SATA */ 516 0, 517 piixsata_chip_map 518 }, 519 { PCI_PRODUCT_INTEL_82801JI_SATA_1, /* Intel 82801JI (ICH10) SATA */ 520 0, 521 piixsata_chip_map 522 }, 523 { PCI_PRODUCT_INTEL_82801JI_SATA_2, /* Intel 82801JI (ICH10) SATA */ 524 0, 525 piixsata_chip_map 526 }, 527 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 528 0, 529 piixsata_chip_map 530 } 531 }; 532 533 const struct pciide_product_desc pciide_amd_products[] = { 534 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 535 0, 536 amd756_chip_map 537 }, 538 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 539 0, 540 amd756_chip_map 541 }, 542 { PCI_PRODUCT_AMD_PBC768_IDE, 543 0, 544 amd756_chip_map 545 }, 546 { PCI_PRODUCT_AMD_8111_IDE, 547 0, 548 amd756_chip_map 549 }, 550 { PCI_PRODUCT_AMD_CS5536_IDE, 551 0, 552 amd756_chip_map 553 } 554 }; 555 556 #ifdef notyet 557 const struct pciide_product_desc pciide_opti_products[] = { 558 559 { PCI_PRODUCT_OPTI_82C621, 560 0, 561 opti_chip_map 562 }, 563 { PCI_PRODUCT_OPTI_82C568, 564 0, 565 opti_chip_map 566 }, 567 { PCI_PRODUCT_OPTI_82D568, 568 0, 569 opti_chip_map 570 } 571 }; 572 #endif 573 574 const struct pciide_product_desc pciide_cmd_products[] = { 575 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 576 0, 577 cmd_chip_map 578 }, 579 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 580 0, 581 cmd0643_9_chip_map 582 }, 583 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 584 0, 585 cmd0643_9_chip_map 586 }, 587 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 588 0, 589 cmd0643_9_chip_map 590 }, 591 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 592 0, 593 cmd0643_9_chip_map 594 }, 595 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 596 IDE_PCI_CLASS_OVERRIDE, 597 cmd680_chip_map 598 }, 599 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 600 0, 601 sii3112_chip_map 602 }, 603 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 604 0, 605 sii3112_chip_map 606 }, 607 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 608 0, 609 sii3112_chip_map 610 }, 611 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 612 0, 613 sii3114_chip_map 614 } 615 }; 616 617 const struct pciide_product_desc pciide_via_products[] = { 618 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 619 0, 620 apollo_chip_map 621 }, 622 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 623 0, 624 apollo_chip_map 625 }, 626 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 627 IDE_PCI_CLASS_OVERRIDE, 628 apollo_chip_map 629 }, 630 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 631 0, 632 apollo_chip_map 633 }, 634 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 635 0, 636 apollo_chip_map 637 }, 638 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 639 0, 640 sata_chip_map 641 }, 642 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 643 0, 644 sata_chip_map 645 }, 646 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 647 0, 648 sata_chip_map 649 }, 650 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 651 0, 652 sata_chip_map 653 }, 654 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 655 0, 656 sata_chip_map 657 }, 658 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 659 0, 660 sata_chip_map 661 } 662 }; 663 664 const struct pciide_product_desc pciide_cypress_products[] = { 665 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 666 IDE_16BIT_IOSPACE, 667 cy693_chip_map 668 } 669 }; 670 671 const struct pciide_product_desc pciide_sis_products[] = { 672 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 673 0, 674 sis_chip_map 675 }, 676 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 677 0, 678 sata_chip_map 679 }, 680 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 681 0, 682 sata_chip_map 683 }, 684 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 685 0, 686 sata_chip_map 687 } 688 }; 689 690 /* 691 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 692 * has been banished to the MD i386 pciide_machdep 693 */ 694 const struct pciide_product_desc pciide_natsemi_products[] = { 695 #ifdef __i386__ 696 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 697 0, 698 gcsc_chip_map 699 }, 700 #endif 701 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 702 0, 703 natsemi_chip_map 704 }, 705 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 706 0, 707 ns_scx200_chip_map 708 } 709 }; 710 711 const struct pciide_product_desc pciide_acer_products[] = { 712 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 713 0, 714 acer_chip_map 715 } 716 }; 717 718 const struct pciide_product_desc pciide_triones_products[] = { 719 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 720 IDE_PCI_CLASS_OVERRIDE, 721 hpt_chip_map, 722 }, 723 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 724 IDE_PCI_CLASS_OVERRIDE, 725 hpt_chip_map 726 }, 727 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 728 IDE_PCI_CLASS_OVERRIDE, 729 hpt_chip_map 730 }, 731 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 732 IDE_PCI_CLASS_OVERRIDE, 733 hpt_chip_map 734 }, 735 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 736 IDE_PCI_CLASS_OVERRIDE, 737 hpt_chip_map 738 } 739 }; 740 741 const struct pciide_product_desc pciide_promise_products[] = { 742 { PCI_PRODUCT_PROMISE_PDC20246, 743 IDE_PCI_CLASS_OVERRIDE, 744 pdc202xx_chip_map, 745 }, 746 { PCI_PRODUCT_PROMISE_PDC20262, 747 IDE_PCI_CLASS_OVERRIDE, 748 pdc202xx_chip_map, 749 }, 750 { PCI_PRODUCT_PROMISE_PDC20265, 751 IDE_PCI_CLASS_OVERRIDE, 752 pdc202xx_chip_map, 753 }, 754 { PCI_PRODUCT_PROMISE_PDC20267, 755 IDE_PCI_CLASS_OVERRIDE, 756 pdc202xx_chip_map, 757 }, 758 { PCI_PRODUCT_PROMISE_PDC20268, 759 IDE_PCI_CLASS_OVERRIDE, 760 pdc202xx_chip_map, 761 }, 762 { PCI_PRODUCT_PROMISE_PDC20268R, 763 IDE_PCI_CLASS_OVERRIDE, 764 pdc202xx_chip_map, 765 }, 766 { PCI_PRODUCT_PROMISE_PDC20269, 767 IDE_PCI_CLASS_OVERRIDE, 768 pdc202xx_chip_map, 769 }, 770 { PCI_PRODUCT_PROMISE_PDC20271, 771 IDE_PCI_CLASS_OVERRIDE, 772 pdc202xx_chip_map, 773 }, 774 { PCI_PRODUCT_PROMISE_PDC20275, 775 IDE_PCI_CLASS_OVERRIDE, 776 pdc202xx_chip_map, 777 }, 778 { PCI_PRODUCT_PROMISE_PDC20276, 779 IDE_PCI_CLASS_OVERRIDE, 780 pdc202xx_chip_map, 781 }, 782 { PCI_PRODUCT_PROMISE_PDC20277, 783 IDE_PCI_CLASS_OVERRIDE, 784 pdc202xx_chip_map, 785 }, 786 { PCI_PRODUCT_PROMISE_PDC20318, 787 IDE_PCI_CLASS_OVERRIDE, 788 pdcsata_chip_map, 789 }, 790 { PCI_PRODUCT_PROMISE_PDC20319, 791 IDE_PCI_CLASS_OVERRIDE, 792 pdcsata_chip_map, 793 }, 794 { PCI_PRODUCT_PROMISE_PDC20371, 795 IDE_PCI_CLASS_OVERRIDE, 796 pdcsata_chip_map, 797 }, 798 { PCI_PRODUCT_PROMISE_PDC20375, 799 IDE_PCI_CLASS_OVERRIDE, 800 pdcsata_chip_map, 801 }, 802 { PCI_PRODUCT_PROMISE_PDC20376, 803 IDE_PCI_CLASS_OVERRIDE, 804 pdcsata_chip_map, 805 }, 806 { PCI_PRODUCT_PROMISE_PDC20377, 807 IDE_PCI_CLASS_OVERRIDE, 808 pdcsata_chip_map, 809 }, 810 { PCI_PRODUCT_PROMISE_PDC20378, 811 IDE_PCI_CLASS_OVERRIDE, 812 pdcsata_chip_map, 813 }, 814 { PCI_PRODUCT_PROMISE_PDC20379, 815 IDE_PCI_CLASS_OVERRIDE, 816 pdcsata_chip_map, 817 }, 818 { PCI_PRODUCT_PROMISE_PDC40518, 819 IDE_PCI_CLASS_OVERRIDE, 820 pdcsata_chip_map, 821 }, 822 { PCI_PRODUCT_PROMISE_PDC40519, 823 IDE_PCI_CLASS_OVERRIDE, 824 pdcsata_chip_map, 825 }, 826 { PCI_PRODUCT_PROMISE_PDC40718, 827 IDE_PCI_CLASS_OVERRIDE, 828 pdcsata_chip_map, 829 }, 830 { PCI_PRODUCT_PROMISE_PDC40719, 831 IDE_PCI_CLASS_OVERRIDE, 832 pdcsata_chip_map, 833 }, 834 { PCI_PRODUCT_PROMISE_PDC40779, 835 IDE_PCI_CLASS_OVERRIDE, 836 pdcsata_chip_map, 837 }, 838 { PCI_PRODUCT_PROMISE_PDC20571, 839 IDE_PCI_CLASS_OVERRIDE, 840 pdcsata_chip_map, 841 }, 842 { PCI_PRODUCT_PROMISE_PDC20575, 843 IDE_PCI_CLASS_OVERRIDE, 844 pdcsata_chip_map, 845 }, 846 { PCI_PRODUCT_PROMISE_PDC20579, 847 IDE_PCI_CLASS_OVERRIDE, 848 pdcsata_chip_map, 849 }, 850 { PCI_PRODUCT_PROMISE_PDC20771, 851 IDE_PCI_CLASS_OVERRIDE, 852 pdcsata_chip_map, 853 }, 854 { PCI_PRODUCT_PROMISE_PDC20775, 855 IDE_PCI_CLASS_OVERRIDE, 856 pdcsata_chip_map, 857 } 858 }; 859 860 const struct pciide_product_desc pciide_acard_products[] = { 861 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 862 IDE_PCI_CLASS_OVERRIDE, 863 acard_chip_map, 864 }, 865 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 866 IDE_PCI_CLASS_OVERRIDE, 867 acard_chip_map, 868 }, 869 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 870 IDE_PCI_CLASS_OVERRIDE, 871 acard_chip_map, 872 }, 873 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 874 IDE_PCI_CLASS_OVERRIDE, 875 acard_chip_map, 876 }, 877 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 878 IDE_PCI_CLASS_OVERRIDE, 879 acard_chip_map, 880 } 881 }; 882 883 const struct pciide_product_desc pciide_serverworks_products[] = { 884 { PCI_PRODUCT_RCC_OSB4_IDE, 885 0, 886 serverworks_chip_map, 887 }, 888 { PCI_PRODUCT_RCC_CSB5_IDE, 889 0, 890 serverworks_chip_map, 891 }, 892 { PCI_PRODUCT_RCC_CSB6_IDE, 893 0, 894 serverworks_chip_map, 895 }, 896 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 897 0, 898 serverworks_chip_map, 899 }, 900 { PCI_PRODUCT_RCC_HT_1000_IDE, 901 0, 902 serverworks_chip_map, 903 }, 904 { PCI_PRODUCT_RCC_K2_SATA, 905 0, 906 svwsata_chip_map, 907 }, 908 { PCI_PRODUCT_RCC_FRODO4_SATA, 909 0, 910 svwsata_chip_map, 911 }, 912 { PCI_PRODUCT_RCC_FRODO8_SATA, 913 0, 914 svwsata_chip_map, 915 }, 916 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 917 0, 918 svwsata_chip_map, 919 }, 920 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 921 0, 922 svwsata_chip_map, 923 } 924 }; 925 926 const struct pciide_product_desc pciide_nvidia_products[] = { 927 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 928 0, 929 nforce_chip_map 930 }, 931 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 932 0, 933 nforce_chip_map 934 }, 935 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 936 0, 937 nforce_chip_map 938 }, 939 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 940 0, 941 nforce_chip_map 942 }, 943 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 944 0, 945 nforce_chip_map 946 }, 947 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 948 0, 949 nforce_chip_map 950 }, 951 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 952 0, 953 nforce_chip_map 954 }, 955 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 956 0, 957 nforce_chip_map 958 }, 959 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 960 0, 961 nforce_chip_map 962 }, 963 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 964 0, 965 nforce_chip_map 966 }, 967 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 968 0, 969 nforce_chip_map 970 }, 971 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 972 0, 973 nforce_chip_map 974 }, 975 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 976 0, 977 nforce_chip_map 978 }, 979 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 980 0, 981 nforce_chip_map 982 }, 983 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 984 0, 985 sata_chip_map 986 }, 987 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 988 0, 989 sata_chip_map 990 }, 991 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 992 0, 993 sata_chip_map 994 }, 995 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 996 0, 997 sata_chip_map 998 }, 999 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 1000 0, 1001 sata_chip_map 1002 }, 1003 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 1004 0, 1005 sata_chip_map 1006 }, 1007 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 1008 0, 1009 sata_chip_map 1010 }, 1011 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 1012 0, 1013 sata_chip_map 1014 }, 1015 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1016 0, 1017 sata_chip_map 1018 }, 1019 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1020 0, 1021 sata_chip_map 1022 }, 1023 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1024 0, 1025 sata_chip_map 1026 }, 1027 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1028 0, 1029 sata_chip_map 1030 }, 1031 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1032 0, 1033 sata_chip_map 1034 }, 1035 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1036 0, 1037 sata_chip_map 1038 }, 1039 { PCI_PRODUCT_NVIDIA_MCP65_SATA, 1040 0, 1041 sata_chip_map 1042 }, 1043 { PCI_PRODUCT_NVIDIA_MCP65_SATA2, 1044 0, 1045 sata_chip_map 1046 }, 1047 { PCI_PRODUCT_NVIDIA_MCP65_SATA3, 1048 0, 1049 sata_chip_map 1050 }, 1051 { PCI_PRODUCT_NVIDIA_MCP65_SATA4, 1052 0, 1053 sata_chip_map 1054 }, 1055 { PCI_PRODUCT_NVIDIA_MCP67_SATA, 1056 0, 1057 sata_chip_map 1058 }, 1059 { PCI_PRODUCT_NVIDIA_MCP67_SATA2, 1060 0, 1061 sata_chip_map 1062 }, 1063 { PCI_PRODUCT_NVIDIA_MCP67_SATA3, 1064 0, 1065 sata_chip_map 1066 }, 1067 { PCI_PRODUCT_NVIDIA_MCP67_SATA4, 1068 0, 1069 sata_chip_map 1070 }, 1071 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1072 0, 1073 sata_chip_map 1074 }, 1075 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1076 0, 1077 sata_chip_map 1078 }, 1079 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1080 0, 1081 sata_chip_map 1082 }, 1083 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1084 0, 1085 sata_chip_map 1086 } 1087 }; 1088 1089 const struct pciide_product_desc pciide_ite_products[] = { 1090 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1091 IDE_PCI_CLASS_OVERRIDE, 1092 ite_chip_map 1093 }, 1094 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1095 IDE_PCI_CLASS_OVERRIDE, 1096 ite_chip_map 1097 } 1098 }; 1099 1100 const struct pciide_product_desc pciide_ati_products[] = { 1101 { PCI_PRODUCT_ATI_SB200_IDE, 1102 0, 1103 ixp_chip_map 1104 }, 1105 { PCI_PRODUCT_ATI_SB300_IDE, 1106 0, 1107 ixp_chip_map 1108 }, 1109 { PCI_PRODUCT_ATI_SB400_IDE, 1110 0, 1111 ixp_chip_map 1112 }, 1113 { PCI_PRODUCT_ATI_SB600_IDE, 1114 0, 1115 ixp_chip_map 1116 }, 1117 { PCI_PRODUCT_ATI_SB700_IDE, 1118 0, 1119 ixp_chip_map 1120 }, 1121 { PCI_PRODUCT_ATI_SB300_SATA, 1122 0, 1123 sii3112_chip_map 1124 }, 1125 { PCI_PRODUCT_ATI_SB400_SATA_1, 1126 0, 1127 sii3112_chip_map 1128 }, 1129 { PCI_PRODUCT_ATI_SB400_SATA_2, 1130 0, 1131 sii3112_chip_map 1132 } 1133 }; 1134 1135 const struct pciide_product_desc pciide_jmicron_products[] = { 1136 { PCI_PRODUCT_JMICRON_JMB361, 1137 0, 1138 jmicron_chip_map 1139 }, 1140 { PCI_PRODUCT_JMICRON_JMB363, 1141 0, 1142 jmicron_chip_map 1143 }, 1144 { PCI_PRODUCT_JMICRON_JMB365, 1145 0, 1146 jmicron_chip_map 1147 }, 1148 { PCI_PRODUCT_JMICRON_JMB366, 1149 0, 1150 jmicron_chip_map 1151 }, 1152 { PCI_PRODUCT_JMICRON_JMB368, 1153 0, 1154 jmicron_chip_map 1155 } 1156 }; 1157 1158 struct pciide_vendor_desc { 1159 u_int32_t ide_vendor; 1160 const struct pciide_product_desc *ide_products; 1161 int ide_nproducts; 1162 }; 1163 1164 const struct pciide_vendor_desc pciide_vendors[] = { 1165 { PCI_VENDOR_INTEL, pciide_intel_products, 1166 sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) }, 1167 { PCI_VENDOR_AMD, pciide_amd_products, 1168 sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) }, 1169 #ifdef notyet 1170 { PCI_VENDOR_OPTI, pciide_opti_products, 1171 sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) }, 1172 #endif 1173 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1174 sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) }, 1175 { PCI_VENDOR_VIATECH, pciide_via_products, 1176 sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) }, 1177 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1178 sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) }, 1179 { PCI_VENDOR_SIS, pciide_sis_products, 1180 sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) }, 1181 { PCI_VENDOR_NS, pciide_natsemi_products, 1182 sizeof(pciide_natsemi_products)/sizeof(pciide_natsemi_products[0]) }, 1183 { PCI_VENDOR_ALI, pciide_acer_products, 1184 sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) }, 1185 { PCI_VENDOR_TRIONES, pciide_triones_products, 1186 sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) }, 1187 { PCI_VENDOR_ACARD, pciide_acard_products, 1188 sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) }, 1189 { PCI_VENDOR_RCC, pciide_serverworks_products, 1190 sizeof(pciide_serverworks_products)/sizeof(pciide_serverworks_products[0]) }, 1191 { PCI_VENDOR_PROMISE, pciide_promise_products, 1192 sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }, 1193 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1194 sizeof(pciide_nvidia_products)/sizeof(pciide_nvidia_products[0]) }, 1195 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1196 sizeof(pciide_ite_products)/sizeof(pciide_ite_products[0]) }, 1197 { PCI_VENDOR_ATI, pciide_ati_products, 1198 sizeof(pciide_ati_products)/sizeof(pciide_ati_products[0]) }, 1199 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1200 sizeof(pciide_jmicron_products)/sizeof(pciide_jmicron_products[0]) } 1201 }; 1202 1203 /* options passed via the 'flags' config keyword */ 1204 #define PCIIDE_OPTIONS_DMA 0x01 1205 1206 int pciide_match(struct device *, void *, void *); 1207 void pciide_attach(struct device *, struct device *, void *); 1208 1209 struct cfattach pciide_pci_ca = { 1210 sizeof(struct pciide_softc), pciide_match, pciide_attach 1211 }; 1212 1213 struct cfattach pciide_jmb_ca = { 1214 sizeof(struct pciide_softc), pciide_match, pciide_attach 1215 }; 1216 1217 struct cfdriver pciide_cd = { 1218 NULL, "pciide", DV_DULL 1219 }; 1220 1221 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1222 1223 const struct pciide_product_desc * 1224 pciide_lookup_product(u_int32_t id) 1225 { 1226 const struct pciide_product_desc *pp; 1227 const struct pciide_vendor_desc *vp; 1228 int i; 1229 1230 for (i = 0, vp = pciide_vendors; 1231 i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]); 1232 vp++, i++) 1233 if (PCI_VENDOR(id) == vp->ide_vendor) 1234 break; 1235 1236 if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0])) 1237 return (NULL); 1238 1239 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1240 if (PCI_PRODUCT(id) == pp->ide_product) 1241 break; 1242 1243 if (i == vp->ide_nproducts) 1244 return (NULL); 1245 return (pp); 1246 } 1247 1248 int 1249 pciide_match(struct device *parent, void *match, void *aux) 1250 { 1251 struct pci_attach_args *pa = aux; 1252 const struct pciide_product_desc *pp; 1253 1254 /* 1255 * Some IDE controllers have severe bugs when used in PCI mode. 1256 * We punt and attach them to the ISA bus instead. 1257 */ 1258 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1259 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1260 return (0); 1261 1262 /* 1263 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1264 * controllers. Let see if we can deal with it anyway. 1265 */ 1266 pp = pciide_lookup_product(pa->pa_id); 1267 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1268 return (1); 1269 1270 /* 1271 * Check the ID register to see that it's a PCI IDE controller. 1272 * If it is, we assume that we can deal with it; it _should_ 1273 * work in a standardized way... 1274 */ 1275 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1276 switch (PCI_SUBCLASS(pa->pa_class)) { 1277 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1278 return (1); 1279 1280 /* 1281 * We only match these if we know they have 1282 * a match, as we may not support native interfaces 1283 * on them. 1284 */ 1285 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1286 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1287 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1288 if (pp) 1289 return (1); 1290 else 1291 return (0); 1292 break; 1293 } 1294 } 1295 1296 return (0); 1297 } 1298 1299 void 1300 pciide_attach(struct device *parent, struct device *self, void *aux) 1301 { 1302 struct pciide_softc *sc = (struct pciide_softc *)self; 1303 struct pci_attach_args *pa = aux; 1304 1305 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1306 if (sc->sc_pp == NULL) 1307 sc->sc_pp = &default_product_desc; 1308 sc->sc_rev = PCI_REVISION(pa->pa_class); 1309 1310 sc->sc_pc = pa->pa_pc; 1311 sc->sc_tag = pa->pa_tag; 1312 1313 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1314 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1315 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1316 1317 sc->sc_dmacmd_read = pciide_dmacmd_read; 1318 sc->sc_dmacmd_write = pciide_dmacmd_write; 1319 sc->sc_dmactl_read = pciide_dmactl_read; 1320 sc->sc_dmactl_write = pciide_dmactl_write; 1321 sc->sc_dmatbl_write = pciide_dmatbl_write; 1322 1323 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1324 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1325 1326 sc->sc_pp->chip_map(sc, pa); 1327 1328 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1329 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1330 DEBUG_PROBE); 1331 } 1332 1333 int 1334 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1335 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1336 { 1337 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1338 struct channel_softc *wdc_cp = &cp->wdc_channel; 1339 pcireg_t csr; 1340 1341 cp->compat = 1; 1342 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1343 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1344 1345 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1346 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1347 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1348 1349 wdc_cp->cmd_iot = pa->pa_iot; 1350 1351 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1352 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1353 printf("%s: couldn't map %s cmd regs\n", 1354 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1355 return (0); 1356 } 1357 1358 wdc_cp->ctl_iot = pa->pa_iot; 1359 1360 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1361 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1362 printf("%s: couldn't map %s ctl regs\n", 1363 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1364 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1365 PCIIDE_COMPAT_CMD_SIZE); 1366 return (0); 1367 } 1368 1369 return (1); 1370 } 1371 1372 int 1373 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1374 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1375 { 1376 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1377 struct channel_softc *wdc_cp = &cp->wdc_channel; 1378 const char *intrstr; 1379 pci_intr_handle_t intrhandle; 1380 pcireg_t maptype; 1381 1382 cp->compat = 0; 1383 1384 if (sc->sc_pci_ih == NULL) { 1385 if (pci_intr_map(pa, &intrhandle) != 0) { 1386 printf("%s: couldn't map native-PCI interrupt\n", 1387 sc->sc_wdcdev.sc_dev.dv_xname); 1388 return (0); 1389 } 1390 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1391 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1392 intrhandle, IPL_BIO, pci_intr, sc, 1393 sc->sc_wdcdev.sc_dev.dv_xname); 1394 if (sc->sc_pci_ih != NULL) { 1395 printf("%s: using %s for native-PCI interrupt\n", 1396 sc->sc_wdcdev.sc_dev.dv_xname, 1397 intrstr ? intrstr : "unknown interrupt"); 1398 } else { 1399 printf("%s: couldn't establish native-PCI interrupt", 1400 sc->sc_wdcdev.sc_dev.dv_xname); 1401 if (intrstr != NULL) 1402 printf(" at %s", intrstr); 1403 printf("\n"); 1404 return (0); 1405 } 1406 } 1407 cp->ih = sc->sc_pci_ih; 1408 1409 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1410 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1411 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1412 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1413 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1414 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1415 maptype, 0, 1416 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1417 printf("%s: couldn't map %s cmd regs\n", 1418 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1419 return (0); 1420 } 1421 1422 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1423 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1424 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1425 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1426 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1427 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1428 maptype, 0, 1429 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1430 printf("%s: couldn't map %s ctl regs\n", 1431 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1432 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1433 return (0); 1434 } 1435 /* 1436 * In native mode, 4 bytes of I/O space are mapped for the control 1437 * register, the control register is at offset 2. Pass the generic 1438 * code a handle for only one byte at the right offset. 1439 */ 1440 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1441 &wdc_cp->ctl_ioh) != 0) { 1442 printf("%s: unable to subregion %s ctl regs\n", 1443 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1444 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1445 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1446 return (0); 1447 } 1448 return (1); 1449 } 1450 1451 void 1452 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1453 { 1454 pcireg_t maptype; 1455 bus_addr_t addr; 1456 1457 /* 1458 * Map DMA registers 1459 * 1460 * Note that sc_dma_ok is the right variable to test to see if 1461 * DMA can be done. If the interface doesn't support DMA, 1462 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1463 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1464 * non-zero if the interface supports DMA and the registers 1465 * could be mapped. 1466 * 1467 * XXX Note that despite the fact that the Bus Master IDE specs 1468 * XXX say that "The bus master IDE function uses 16 bytes of IO 1469 * XXX space", some controllers (at least the United 1470 * XXX Microelectronics UM8886BF) place it in memory space. 1471 */ 1472 1473 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1474 PCIIDE_REG_BUS_MASTER_DMA); 1475 1476 switch (maptype) { 1477 case PCI_MAPREG_TYPE_IO: 1478 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1479 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1480 &addr, NULL, NULL) == 0); 1481 if (sc->sc_dma_ok == 0) { 1482 printf(", unused (couldn't query registers)"); 1483 break; 1484 } 1485 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1486 && addr >= 0x10000) { 1487 sc->sc_dma_ok = 0; 1488 printf(", unused (registers at unsafe address %#lx)", addr); 1489 break; 1490 } 1491 /* FALLTHROUGH */ 1492 1493 case PCI_MAPREG_MEM_TYPE_32BIT: 1494 sc->sc_dma_ok = (pci_mapreg_map(pa, 1495 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1496 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL, 0) == 0); 1497 sc->sc_dmat = pa->pa_dmat; 1498 if (sc->sc_dma_ok == 0) { 1499 printf(", unused (couldn't map registers)"); 1500 } else { 1501 sc->sc_wdcdev.dma_arg = sc; 1502 sc->sc_wdcdev.dma_init = pciide_dma_init; 1503 sc->sc_wdcdev.dma_start = pciide_dma_start; 1504 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1505 } 1506 break; 1507 1508 default: 1509 sc->sc_dma_ok = 0; 1510 printf(", (unsupported maptype 0x%x)", maptype); 1511 break; 1512 } 1513 } 1514 1515 int 1516 pciide_intr_flag(struct pciide_channel *cp) 1517 { 1518 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1519 int chan = cp->wdc_channel.channel; 1520 1521 if (cp->dma_in_progress) { 1522 int retry = 10; 1523 int status; 1524 1525 /* Check the status register */ 1526 for (retry = 10; retry > 0; retry--) { 1527 status = PCIIDE_DMACTL_READ(sc, chan); 1528 if (status & IDEDMA_CTL_INTR) { 1529 break; 1530 } 1531 DELAY(5); 1532 } 1533 1534 /* Not for us. */ 1535 if (retry == 0) 1536 return (0); 1537 1538 return (1); 1539 } 1540 1541 return (-1); 1542 } 1543 1544 int 1545 pciide_compat_intr(void *arg) 1546 { 1547 struct pciide_channel *cp = arg; 1548 1549 if (pciide_intr_flag(cp) == 0) 1550 return (0); 1551 1552 #ifdef DIAGNOSTIC 1553 /* should only be called for a compat channel */ 1554 if (cp->compat == 0) 1555 panic("pciide compat intr called for non-compat chan %p", cp); 1556 #endif 1557 return (wdcintr(&cp->wdc_channel)); 1558 } 1559 1560 int 1561 pciide_pci_intr(void *arg) 1562 { 1563 struct pciide_softc *sc = arg; 1564 struct pciide_channel *cp; 1565 struct channel_softc *wdc_cp; 1566 int i, rv, crv; 1567 1568 rv = 0; 1569 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1570 cp = &sc->pciide_channels[i]; 1571 wdc_cp = &cp->wdc_channel; 1572 1573 /* If a compat channel skip. */ 1574 if (cp->compat) 1575 continue; 1576 1577 if (pciide_intr_flag(cp) == 0) 1578 continue; 1579 1580 crv = wdcintr(wdc_cp); 1581 if (crv == 0) 1582 ; /* leave rv alone */ 1583 else if (crv == 1) 1584 rv = 1; /* claim the intr */ 1585 else if (rv == 0) /* crv should be -1 in this case */ 1586 rv = crv; /* if we've done no better, take it */ 1587 } 1588 return (rv); 1589 } 1590 1591 u_int8_t 1592 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1593 { 1594 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1595 IDEDMA_CMD(chan))); 1596 } 1597 1598 void 1599 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1600 { 1601 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1602 IDEDMA_CMD(chan), val); 1603 } 1604 1605 u_int8_t 1606 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1607 { 1608 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1609 IDEDMA_CTL(chan))); 1610 } 1611 1612 void 1613 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 1614 { 1615 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1616 IDEDMA_CTL(chan), val); 1617 } 1618 1619 void 1620 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 1621 { 1622 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1623 IDEDMA_TBL(chan), val); 1624 } 1625 1626 void 1627 pciide_channel_dma_setup(struct pciide_channel *cp) 1628 { 1629 int drive; 1630 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1631 struct ata_drive_datas *drvp; 1632 1633 for (drive = 0; drive < 2; drive++) { 1634 drvp = &cp->wdc_channel.ch_drive[drive]; 1635 /* If no drive, skip */ 1636 if ((drvp->drive_flags & DRIVE) == 0) 1637 continue; 1638 /* setup DMA if needed */ 1639 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1640 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1641 sc->sc_dma_ok == 0) { 1642 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1643 continue; 1644 } 1645 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1646 != 0) { 1647 /* Abort DMA setup */ 1648 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1649 continue; 1650 } 1651 } 1652 } 1653 1654 int 1655 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 1656 { 1657 bus_dma_segment_t seg; 1658 int error, rseg; 1659 const bus_size_t dma_table_size = 1660 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1661 struct pciide_dma_maps *dma_maps = 1662 &sc->pciide_channels[channel].dma_maps[drive]; 1663 1664 /* If table was already allocated, just return */ 1665 if (dma_maps->dma_table) 1666 return (0); 1667 1668 /* Allocate memory for the DMA tables and map it */ 1669 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1670 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1671 BUS_DMA_NOWAIT)) != 0) { 1672 printf("%s:%d: unable to allocate table DMA for " 1673 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1674 channel, drive, error); 1675 return (error); 1676 } 1677 1678 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1679 dma_table_size, 1680 (caddr_t *)&dma_maps->dma_table, 1681 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1682 printf("%s:%d: unable to map table DMA for" 1683 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1684 channel, drive, error); 1685 return (error); 1686 } 1687 1688 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 1689 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 1690 seg.ds_addr), DEBUG_PROBE); 1691 1692 /* Create and load table DMA map for this disk */ 1693 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1694 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1695 &dma_maps->dmamap_table)) != 0) { 1696 printf("%s:%d: unable to create table DMA map for " 1697 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1698 channel, drive, error); 1699 return (error); 1700 } 1701 if ((error = bus_dmamap_load(sc->sc_dmat, 1702 dma_maps->dmamap_table, 1703 dma_maps->dma_table, 1704 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1705 printf("%s:%d: unable to load table DMA map for " 1706 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1707 channel, drive, error); 1708 return (error); 1709 } 1710 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1711 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 1712 /* Create a xfer DMA map for this drive */ 1713 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1714 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 1715 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1716 &dma_maps->dmamap_xfer)) != 0) { 1717 printf("%s:%d: unable to create xfer DMA map for " 1718 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1719 channel, drive, error); 1720 return (error); 1721 } 1722 return (0); 1723 } 1724 1725 int 1726 pciide_dma_init(void *v, int channel, int drive, void *databuf, 1727 size_t datalen, int flags) 1728 { 1729 struct pciide_softc *sc = v; 1730 int error, seg; 1731 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1732 struct pciide_dma_maps *dma_maps = 1733 &sc->pciide_channels[channel].dma_maps[drive]; 1734 #ifndef BUS_DMA_RAW 1735 #define BUS_DMA_RAW 0 1736 #endif 1737 1738 error = bus_dmamap_load(sc->sc_dmat, 1739 dma_maps->dmamap_xfer, 1740 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 1741 if (error) { 1742 printf("%s:%d: unable to load xfer DMA map for " 1743 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1744 channel, drive, error); 1745 return (error); 1746 } 1747 1748 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1749 dma_maps->dmamap_xfer->dm_mapsize, 1750 (flags & WDC_DMA_READ) ? 1751 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1752 1753 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1754 #ifdef DIAGNOSTIC 1755 /* A segment must not cross a 64k boundary */ 1756 { 1757 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1758 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1759 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1760 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1761 printf("pciide_dma: segment %d physical addr 0x%lx" 1762 " len 0x%lx not properly aligned\n", 1763 seg, phys, len); 1764 panic("pciide_dma: buf align"); 1765 } 1766 } 1767 #endif 1768 dma_maps->dma_table[seg].base_addr = 1769 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1770 dma_maps->dma_table[seg].byte_count = 1771 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1772 IDEDMA_BYTE_COUNT_MASK); 1773 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1774 seg, letoh32(dma_maps->dma_table[seg].byte_count), 1775 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1776 1777 } 1778 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1779 htole32(IDEDMA_BYTE_COUNT_EOT); 1780 1781 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1782 dma_maps->dmamap_table->dm_mapsize, 1783 BUS_DMASYNC_PREWRITE); 1784 1785 /* Maps are ready. Start DMA function */ 1786 #ifdef DIAGNOSTIC 1787 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1788 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1789 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1790 panic("pciide_dma_init: table align"); 1791 } 1792 #endif 1793 1794 /* Clear status bits */ 1795 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 1796 /* Write table addr */ 1797 PCIIDE_DMATBL_WRITE(sc, channel, 1798 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1799 /* set read/write */ 1800 PCIIDE_DMACMD_WRITE(sc, channel, 1801 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 1802 /* remember flags */ 1803 dma_maps->dma_flags = flags; 1804 return (0); 1805 } 1806 1807 void 1808 pciide_dma_start(void *v, int channel, int drive) 1809 { 1810 struct pciide_softc *sc = v; 1811 1812 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 1813 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 1814 IDEDMA_CMD_START); 1815 1816 sc->pciide_channels[channel].dma_in_progress = 1; 1817 } 1818 1819 int 1820 pciide_dma_finish(void *v, int channel, int drive, int force) 1821 { 1822 struct pciide_softc *sc = v; 1823 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1824 u_int8_t status; 1825 int error = 0; 1826 struct pciide_dma_maps *dma_maps = 1827 &sc->pciide_channels[channel].dma_maps[drive]; 1828 1829 status = PCIIDE_DMACTL_READ(sc, channel); 1830 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1831 DEBUG_XFERS); 1832 1833 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 1834 error = WDC_DMAST_NOIRQ; 1835 goto done; 1836 } 1837 1838 /* stop DMA channel */ 1839 PCIIDE_DMACMD_WRITE(sc, channel, 1840 ((dma_maps->dma_flags & WDC_DMA_READ) ? 1841 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 1842 1843 /* Unload the map of the data buffer */ 1844 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1845 dma_maps->dmamap_xfer->dm_mapsize, 1846 (dma_maps->dma_flags & WDC_DMA_READ) ? 1847 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1848 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1849 1850 /* Clear status bits */ 1851 PCIIDE_DMACTL_WRITE(sc, channel, status); 1852 1853 if ((status & IDEDMA_CTL_ERR) != 0) { 1854 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1855 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1856 error |= WDC_DMAST_ERR; 1857 } 1858 1859 if ((status & IDEDMA_CTL_INTR) == 0) { 1860 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1861 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1862 drive, status); 1863 error |= WDC_DMAST_NOIRQ; 1864 } 1865 1866 if ((status & IDEDMA_CTL_ACT) != 0) { 1867 /* data underrun, may be a valid condition for ATAPI */ 1868 error |= WDC_DMAST_UNDER; 1869 } 1870 1871 done: 1872 sc->pciide_channels[channel].dma_in_progress = 0; 1873 return (error); 1874 } 1875 1876 void 1877 pciide_irqack(struct channel_softc *chp) 1878 { 1879 struct pciide_channel *cp = (struct pciide_channel *)chp; 1880 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1881 int chan = chp->channel; 1882 1883 /* clear status bits in IDE DMA registers */ 1884 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 1885 } 1886 1887 /* some common code used by several chip_map */ 1888 int 1889 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 1890 { 1891 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1892 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1893 cp->name = PCIIDE_CHANNEL_NAME(channel); 1894 cp->wdc_channel.channel = channel; 1895 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1896 cp->wdc_channel.ch_queue = 1897 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1898 if (cp->wdc_channel.ch_queue == NULL) { 1899 printf("%s: %s " 1900 "cannot allocate memory for command queue", 1901 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1902 return (0); 1903 } 1904 cp->hw_ok = 1; 1905 1906 return (1); 1907 } 1908 1909 /* some common code used by several chip channel_map */ 1910 void 1911 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 1912 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 1913 int (*pci_intr)(void *)) 1914 { 1915 struct channel_softc *wdc_cp = &cp->wdc_channel; 1916 1917 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1918 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1919 pci_intr); 1920 else 1921 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1922 wdc_cp->channel, cmdsizep, ctlsizep); 1923 if (cp->hw_ok == 0) 1924 return; 1925 wdc_cp->data32iot = wdc_cp->cmd_iot; 1926 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1927 wdcattach(wdc_cp); 1928 } 1929 1930 /* 1931 * Generic code to call to know if a channel can be disabled. Return 1 1932 * if channel can be disabled, 0 if not 1933 */ 1934 int 1935 pciide_chan_candisable(struct pciide_channel *cp) 1936 { 1937 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1938 struct channel_softc *wdc_cp = &cp->wdc_channel; 1939 1940 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1941 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1942 printf("%s: %s disabled (no drives)\n", 1943 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1944 cp->hw_ok = 0; 1945 return (1); 1946 } 1947 return (0); 1948 } 1949 1950 /* 1951 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1952 * Set hw_ok=0 on failure 1953 */ 1954 void 1955 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 1956 int compatchan, int interface) 1957 { 1958 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1959 struct channel_softc *wdc_cp = &cp->wdc_channel; 1960 1961 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1962 return; 1963 1964 cp->compat = 1; 1965 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1966 pa, compatchan, pciide_compat_intr, cp); 1967 if (cp->ih == NULL) { 1968 printf("%s: no compatibility interrupt for use by %s\n", 1969 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1970 cp->hw_ok = 0; 1971 } 1972 } 1973 1974 /* 1975 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 1976 * Set hw_ok=0 on failure 1977 */ 1978 void 1979 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 1980 int compatchan, int interface) 1981 { 1982 struct channel_softc *wdc_cp = &cp->wdc_channel; 1983 1984 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1985 return; 1986 1987 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 1988 } 1989 1990 void 1991 pciide_print_channels(int nchannels, pcireg_t interface) 1992 { 1993 int i; 1994 1995 for (i = 0; i < nchannels; i++) { 1996 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 1997 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 1998 "configured" : "wired", 1999 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 2000 "compatibility"); 2001 } 2002 2003 printf("\n"); 2004 } 2005 2006 void 2007 pciide_print_modes(struct pciide_channel *cp) 2008 { 2009 wdc_print_current_modes(&cp->wdc_channel); 2010 } 2011 2012 void 2013 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2014 { 2015 struct pciide_channel *cp; 2016 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2017 pcireg_t csr; 2018 int channel, drive; 2019 struct ata_drive_datas *drvp; 2020 u_int8_t idedma_ctl; 2021 bus_size_t cmdsize, ctlsize; 2022 char *failreason; 2023 2024 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2025 printf(": DMA"); 2026 if (sc->sc_pp == &default_product_desc && 2027 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2028 PCIIDE_OPTIONS_DMA) == 0) { 2029 printf(" (unsupported)"); 2030 sc->sc_dma_ok = 0; 2031 } else { 2032 pciide_mapreg_dma(sc, pa); 2033 if (sc->sc_dma_ok != 0) 2034 printf(", (partial support)"); 2035 } 2036 } else { 2037 printf(": no DMA"); 2038 sc->sc_dma_ok = 0; 2039 } 2040 if (sc->sc_dma_ok) { 2041 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2042 sc->sc_wdcdev.irqack = pciide_irqack; 2043 } 2044 sc->sc_wdcdev.PIO_cap = 0; 2045 sc->sc_wdcdev.DMA_cap = 0; 2046 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2047 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2048 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2049 2050 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2051 2052 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2053 cp = &sc->pciide_channels[channel]; 2054 if (pciide_chansetup(sc, channel, interface) == 0) 2055 continue; 2056 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2057 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2058 &ctlsize, pciide_pci_intr); 2059 } else { 2060 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2061 channel, &cmdsize, &ctlsize); 2062 } 2063 if (cp->hw_ok == 0) 2064 continue; 2065 /* 2066 * Check to see if something appears to be there. 2067 */ 2068 failreason = NULL; 2069 pciide_map_compat_intr(pa, cp, channel, interface); 2070 if (cp->hw_ok == 0) 2071 continue; 2072 if (!wdcprobe(&cp->wdc_channel)) { 2073 failreason = "not responding; disabled or no drives?"; 2074 goto next; 2075 } 2076 /* 2077 * Now, make sure it's actually attributable to this PCI IDE 2078 * channel by trying to access the channel again while the 2079 * PCI IDE controller's I/O space is disabled. (If the 2080 * channel no longer appears to be there, it belongs to 2081 * this controller.) YUCK! 2082 */ 2083 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2084 PCI_COMMAND_STATUS_REG); 2085 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2086 csr & ~PCI_COMMAND_IO_ENABLE); 2087 if (wdcprobe(&cp->wdc_channel)) 2088 failreason = "other hardware responding at addresses"; 2089 pci_conf_write(sc->sc_pc, sc->sc_tag, 2090 PCI_COMMAND_STATUS_REG, csr); 2091 next: 2092 if (failreason) { 2093 printf("%s: %s ignored (%s)\n", 2094 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2095 failreason); 2096 cp->hw_ok = 0; 2097 pciide_unmap_compat_intr(pa, cp, channel, interface); 2098 bus_space_unmap(cp->wdc_channel.cmd_iot, 2099 cp->wdc_channel.cmd_ioh, cmdsize); 2100 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2101 bus_space_unmap(cp->wdc_channel.ctl_iot, 2102 cp->ctl_baseioh, ctlsize); 2103 else 2104 bus_space_unmap(cp->wdc_channel.ctl_iot, 2105 cp->wdc_channel.ctl_ioh, ctlsize); 2106 } 2107 if (cp->hw_ok) { 2108 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2109 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2110 wdcattach(&cp->wdc_channel); 2111 } 2112 } 2113 2114 if (sc->sc_dma_ok == 0) 2115 return; 2116 2117 /* Allocate DMA maps */ 2118 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2119 idedma_ctl = 0; 2120 cp = &sc->pciide_channels[channel]; 2121 for (drive = 0; drive < 2; drive++) { 2122 drvp = &cp->wdc_channel.ch_drive[drive]; 2123 /* If no drive, skip */ 2124 if ((drvp->drive_flags & DRIVE) == 0) 2125 continue; 2126 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2127 continue; 2128 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2129 /* Abort DMA setup */ 2130 printf("%s:%d:%d: cannot allocate DMA maps, " 2131 "using PIO transfers\n", 2132 sc->sc_wdcdev.sc_dev.dv_xname, 2133 channel, drive); 2134 drvp->drive_flags &= ~DRIVE_DMA; 2135 } 2136 printf("%s:%d:%d: using DMA data transfers\n", 2137 sc->sc_wdcdev.sc_dev.dv_xname, 2138 channel, drive); 2139 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2140 } 2141 if (idedma_ctl != 0) { 2142 /* Add software bits in status register */ 2143 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2144 } 2145 } 2146 } 2147 2148 void 2149 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2150 { 2151 struct pciide_channel *cp; 2152 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2153 int channel; 2154 bus_size_t cmdsize, ctlsize; 2155 2156 if (interface == 0) { 2157 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2158 DEBUG_PROBE); 2159 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2160 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2161 } 2162 2163 printf(": DMA"); 2164 pciide_mapreg_dma(sc, pa); 2165 printf("\n"); 2166 2167 if (sc->sc_dma_ok) { 2168 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2169 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2170 sc->sc_wdcdev.irqack = pciide_irqack; 2171 } 2172 sc->sc_wdcdev.PIO_cap = 4; 2173 sc->sc_wdcdev.DMA_cap = 2; 2174 sc->sc_wdcdev.UDMA_cap = 6; 2175 2176 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2177 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2178 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2179 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2180 sc->sc_wdcdev.set_modes = sata_setup_channel; 2181 2182 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2183 cp = &sc->pciide_channels[channel]; 2184 if (pciide_chansetup(sc, channel, interface) == 0) 2185 continue; 2186 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2187 pciide_pci_intr); 2188 sata_setup_channel(&cp->wdc_channel); 2189 } 2190 } 2191 2192 void 2193 sata_setup_channel(struct channel_softc *chp) 2194 { 2195 struct ata_drive_datas *drvp; 2196 int drive; 2197 u_int32_t idedma_ctl; 2198 struct pciide_channel *cp = (struct pciide_channel *)chp; 2199 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2200 2201 /* setup DMA if needed */ 2202 pciide_channel_dma_setup(cp); 2203 2204 idedma_ctl = 0; 2205 2206 for (drive = 0; drive < 2; drive++) { 2207 drvp = &chp->ch_drive[drive]; 2208 /* If no drive, skip */ 2209 if ((drvp->drive_flags & DRIVE) == 0) 2210 continue; 2211 if (drvp->drive_flags & DRIVE_UDMA) { 2212 /* use Ultra/DMA */ 2213 drvp->drive_flags &= ~DRIVE_DMA; 2214 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2215 } else if (drvp->drive_flags & DRIVE_DMA) { 2216 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2217 } 2218 } 2219 2220 /* 2221 * Nothing to do to setup modes; it is meaningless in S-ATA 2222 * (but many S-ATA drives still want to get the SET_FEATURE 2223 * command). 2224 */ 2225 if (idedma_ctl != 0) { 2226 /* Add software bits in status register */ 2227 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2228 } 2229 pciide_print_modes(cp); 2230 } 2231 2232 void 2233 piix_timing_debug(struct pciide_softc *sc) 2234 { 2235 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2236 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2237 DEBUG_PROBE); 2238 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2239 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2240 WDCDEBUG_PRINT((", sidetim=0x%x", 2241 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2242 DEBUG_PROBE); 2243 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2244 WDCDEBUG_PRINT((", udmareg 0x%x", 2245 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2246 DEBUG_PROBE); 2247 } 2248 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2249 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2250 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2251 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2252 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2253 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2254 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2255 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2256 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2257 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2258 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2259 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2260 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2261 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2262 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2263 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2264 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2265 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2266 DEBUG_PROBE); 2267 } 2268 } 2269 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2270 } 2271 2272 void 2273 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2274 { 2275 struct pciide_channel *cp; 2276 int channel; 2277 u_int32_t idetim; 2278 bus_size_t cmdsize, ctlsize; 2279 2280 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2281 2282 printf(": DMA"); 2283 pciide_mapreg_dma(sc, pa); 2284 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2285 WDC_CAPABILITY_MODE; 2286 if (sc->sc_dma_ok) { 2287 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2288 sc->sc_wdcdev.irqack = pciide_irqack; 2289 switch (sc->sc_pp->ide_product) { 2290 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2291 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2292 case PCI_PRODUCT_INTEL_82371AB_IDE: 2293 case PCI_PRODUCT_INTEL_82372FB_IDE: 2294 case PCI_PRODUCT_INTEL_82440MX_IDE: 2295 case PCI_PRODUCT_INTEL_82451NX: 2296 case PCI_PRODUCT_INTEL_82801AA_IDE: 2297 case PCI_PRODUCT_INTEL_82801AB_IDE: 2298 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2299 case PCI_PRODUCT_INTEL_82801BA_IDE: 2300 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2301 case PCI_PRODUCT_INTEL_82801CA_IDE: 2302 case PCI_PRODUCT_INTEL_82801DB_IDE: 2303 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2304 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2305 case PCI_PRODUCT_INTEL_82801EB_IDE: 2306 case PCI_PRODUCT_INTEL_82801FB_IDE: 2307 case PCI_PRODUCT_INTEL_82801GB_IDE: 2308 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2309 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2310 break; 2311 } 2312 } 2313 sc->sc_wdcdev.PIO_cap = 4; 2314 sc->sc_wdcdev.DMA_cap = 2; 2315 switch (sc->sc_pp->ide_product) { 2316 case PCI_PRODUCT_INTEL_82801AA_IDE: 2317 case PCI_PRODUCT_INTEL_82372FB_IDE: 2318 sc->sc_wdcdev.UDMA_cap = 4; 2319 break; 2320 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2321 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2322 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2323 case PCI_PRODUCT_INTEL_82801BA_IDE: 2324 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2325 case PCI_PRODUCT_INTEL_82801CA_IDE: 2326 case PCI_PRODUCT_INTEL_82801DB_IDE: 2327 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2328 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2329 case PCI_PRODUCT_INTEL_82801EB_IDE: 2330 case PCI_PRODUCT_INTEL_82801FB_IDE: 2331 case PCI_PRODUCT_INTEL_82801GB_IDE: 2332 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2333 sc->sc_wdcdev.UDMA_cap = 5; 2334 break; 2335 default: 2336 sc->sc_wdcdev.UDMA_cap = 2; 2337 break; 2338 } 2339 2340 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2341 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2342 sc->sc_wdcdev.set_modes = piix_setup_channel; 2343 } else { 2344 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2345 } 2346 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2347 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2348 2349 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2350 2351 piix_timing_debug(sc); 2352 2353 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2354 cp = &sc->pciide_channels[channel]; 2355 2356 /* PIIX is compat-only */ 2357 if (pciide_chansetup(sc, channel, 0) == 0) 2358 continue; 2359 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2360 if ((PIIX_IDETIM_READ(idetim, channel) & 2361 PIIX_IDETIM_IDE) == 0) { 2362 printf("%s: %s ignored (disabled)\n", 2363 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2364 continue; 2365 } 2366 /* PIIX are compat-only pciide devices */ 2367 pciide_map_compat_intr(pa, cp, channel, 0); 2368 if (cp->hw_ok == 0) 2369 continue; 2370 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 2371 if (cp->hw_ok == 0) 2372 goto next; 2373 if (pciide_chan_candisable(cp)) { 2374 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2375 channel); 2376 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2377 idetim); 2378 } 2379 if (cp->hw_ok == 0) 2380 goto next; 2381 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2382 next: 2383 if (cp->hw_ok == 0) 2384 pciide_unmap_compat_intr(pa, cp, channel, 0); 2385 } 2386 2387 piix_timing_debug(sc); 2388 } 2389 2390 void 2391 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2392 { 2393 struct pciide_channel *cp; 2394 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2395 int channel; 2396 bus_size_t cmdsize, ctlsize; 2397 u_int8_t reg, ich = 0; 2398 2399 printf(": DMA"); 2400 pciide_mapreg_dma(sc, pa); 2401 2402 if (sc->sc_dma_ok) { 2403 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2404 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2405 sc->sc_wdcdev.irqack = pciide_irqack; 2406 sc->sc_wdcdev.DMA_cap = 2; 2407 sc->sc_wdcdev.UDMA_cap = 6; 2408 } 2409 sc->sc_wdcdev.PIO_cap = 4; 2410 2411 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2412 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2413 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2414 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2415 sc->sc_wdcdev.set_modes = sata_setup_channel; 2416 2417 switch(sc->sc_pp->ide_product) { 2418 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2419 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2420 case PCI_PRODUCT_INTEL_82801EB_SATA: 2421 case PCI_PRODUCT_INTEL_82801ER_SATA: 2422 ich = 5; 2423 break; 2424 case PCI_PRODUCT_INTEL_82801FB_SATA: 2425 case PCI_PRODUCT_INTEL_82801FR_SATA: 2426 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2427 ich = 6; 2428 break; 2429 default: 2430 ich = 7; 2431 break; 2432 } 2433 2434 /* 2435 * Put the SATA portion of controllers that don't operate in combined 2436 * mode into native PCI modes so the maximum number of devices can be 2437 * used. Intel calls this "enhanced mode" 2438 */ 2439 if (ich == 5) { 2440 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2441 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2442 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2443 ICH5_SATA_PI); 2444 reg |= ICH5_SATA_PI_PRI_NATIVE | 2445 ICH5_SATA_PI_SEC_NATIVE; 2446 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2447 ICH5_SATA_PI, reg); 2448 interface |= PCIIDE_INTERFACE_PCI(0) | 2449 PCIIDE_INTERFACE_PCI(1); 2450 } 2451 } else { 2452 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2453 ICH6_SATA_MAP_CMB_MASK; 2454 if (reg != ICH6_SATA_MAP_CMB_PRI && 2455 reg != ICH6_SATA_MAP_CMB_SEC) { 2456 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2457 ICH5_SATA_PI); 2458 reg |= ICH5_SATA_PI_PRI_NATIVE | 2459 ICH5_SATA_PI_SEC_NATIVE; 2460 2461 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2462 ICH5_SATA_PI, reg); 2463 interface |= PCIIDE_INTERFACE_PCI(0) | 2464 PCIIDE_INTERFACE_PCI(1); 2465 2466 /* 2467 * Ask for SATA IDE Mode, we don't need to do this 2468 * for the combined mode case as combined mode is 2469 * only allowed in IDE Mode 2470 */ 2471 if (ich >= 7) { 2472 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2473 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2474 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2475 ICH5_SATA_MAP, reg); 2476 } 2477 } 2478 } 2479 2480 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2481 2482 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2483 cp = &sc->pciide_channels[channel]; 2484 if (pciide_chansetup(sc, channel, interface) == 0) 2485 continue; 2486 2487 pciide_map_compat_intr(pa, cp, channel, interface); 2488 if (cp->hw_ok == 0) 2489 continue; 2490 2491 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2492 pciide_pci_intr); 2493 if (cp->hw_ok != 0) 2494 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2495 2496 if (cp->hw_ok == 0) 2497 pciide_unmap_compat_intr(pa, cp, channel, interface); 2498 } 2499 } 2500 2501 void 2502 piix_setup_channel(struct channel_softc *chp) 2503 { 2504 u_int8_t mode[2], drive; 2505 u_int32_t oidetim, idetim, idedma_ctl; 2506 struct pciide_channel *cp = (struct pciide_channel *)chp; 2507 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2508 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2509 2510 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2511 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2512 idedma_ctl = 0; 2513 2514 /* set up new idetim: Enable IDE registers decode */ 2515 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2516 chp->channel); 2517 2518 /* setup DMA */ 2519 pciide_channel_dma_setup(cp); 2520 2521 /* 2522 * Here we have to mess up with drives mode: PIIX can't have 2523 * different timings for master and slave drives. 2524 * We need to find the best combination. 2525 */ 2526 2527 /* If both drives supports DMA, take the lower mode */ 2528 if ((drvp[0].drive_flags & DRIVE_DMA) && 2529 (drvp[1].drive_flags & DRIVE_DMA)) { 2530 mode[0] = mode[1] = 2531 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2532 drvp[0].DMA_mode = mode[0]; 2533 drvp[1].DMA_mode = mode[1]; 2534 goto ok; 2535 } 2536 /* 2537 * If only one drive supports DMA, use its mode, and 2538 * put the other one in PIO mode 0 if mode not compatible 2539 */ 2540 if (drvp[0].drive_flags & DRIVE_DMA) { 2541 mode[0] = drvp[0].DMA_mode; 2542 mode[1] = drvp[1].PIO_mode; 2543 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2544 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2545 mode[1] = drvp[1].PIO_mode = 0; 2546 goto ok; 2547 } 2548 if (drvp[1].drive_flags & DRIVE_DMA) { 2549 mode[1] = drvp[1].DMA_mode; 2550 mode[0] = drvp[0].PIO_mode; 2551 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2552 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2553 mode[0] = drvp[0].PIO_mode = 0; 2554 goto ok; 2555 } 2556 /* 2557 * If both drives are not DMA, takes the lower mode, unless 2558 * one of them is PIO mode < 2 2559 */ 2560 if (drvp[0].PIO_mode < 2) { 2561 mode[0] = drvp[0].PIO_mode = 0; 2562 mode[1] = drvp[1].PIO_mode; 2563 } else if (drvp[1].PIO_mode < 2) { 2564 mode[1] = drvp[1].PIO_mode = 0; 2565 mode[0] = drvp[0].PIO_mode; 2566 } else { 2567 mode[0] = mode[1] = 2568 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 2569 drvp[0].PIO_mode = mode[0]; 2570 drvp[1].PIO_mode = mode[1]; 2571 } 2572 ok: /* The modes are setup */ 2573 for (drive = 0; drive < 2; drive++) { 2574 if (drvp[drive].drive_flags & DRIVE_DMA) { 2575 idetim |= piix_setup_idetim_timings( 2576 mode[drive], 1, chp->channel); 2577 goto end; 2578 } 2579 } 2580 /* If we are there, none of the drives are DMA */ 2581 if (mode[0] >= 2) 2582 idetim |= piix_setup_idetim_timings( 2583 mode[0], 0, chp->channel); 2584 else 2585 idetim |= piix_setup_idetim_timings( 2586 mode[1], 0, chp->channel); 2587 end: /* 2588 * timing mode is now set up in the controller. Enable 2589 * it per-drive 2590 */ 2591 for (drive = 0; drive < 2; drive++) { 2592 /* If no drive, skip */ 2593 if ((drvp[drive].drive_flags & DRIVE) == 0) 2594 continue; 2595 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 2596 if (drvp[drive].drive_flags & DRIVE_DMA) 2597 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2598 } 2599 if (idedma_ctl != 0) { 2600 /* Add software bits in status register */ 2601 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2602 IDEDMA_CTL(chp->channel), 2603 idedma_ctl); 2604 } 2605 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2606 pciide_print_modes(cp); 2607 } 2608 2609 void 2610 piix3_4_setup_channel(struct channel_softc *chp) 2611 { 2612 struct ata_drive_datas *drvp; 2613 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 2614 struct pciide_channel *cp = (struct pciide_channel *)chp; 2615 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2616 int drive; 2617 int channel = chp->channel; 2618 2619 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2620 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 2621 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 2622 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 2623 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 2624 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 2625 PIIX_SIDETIM_RTC_MASK(channel)); 2626 2627 idedma_ctl = 0; 2628 /* If channel disabled, no need to go further */ 2629 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 2630 return; 2631 /* set up new idetim: Enable IDE registers decode */ 2632 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 2633 2634 /* setup DMA if needed */ 2635 pciide_channel_dma_setup(cp); 2636 2637 for (drive = 0; drive < 2; drive++) { 2638 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 2639 PIIX_UDMATIM_SET(0x3, channel, drive)); 2640 drvp = &chp->ch_drive[drive]; 2641 /* If no drive, skip */ 2642 if ((drvp->drive_flags & DRIVE) == 0) 2643 continue; 2644 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2645 (drvp->drive_flags & DRIVE_UDMA) == 0)) 2646 goto pio; 2647 2648 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2649 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2650 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2651 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2652 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2653 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2654 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2655 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2656 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2657 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2658 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2659 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2660 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2661 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2662 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2663 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2664 ideconf |= PIIX_CONFIG_PINGPONG; 2665 } 2666 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2667 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2668 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2669 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 2670 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 2671 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2672 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2673 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2674 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2675 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2676 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2677 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2678 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 2679 /* setup Ultra/100 */ 2680 if (drvp->UDMA_mode > 2 && 2681 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2682 drvp->UDMA_mode = 2; 2683 if (drvp->UDMA_mode > 4) { 2684 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 2685 } else { 2686 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 2687 if (drvp->UDMA_mode > 2) { 2688 ideconf |= PIIX_CONFIG_UDMA66(channel, 2689 drive); 2690 } else { 2691 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 2692 drive); 2693 } 2694 } 2695 } 2696 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2697 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2698 /* setup Ultra/66 */ 2699 if (drvp->UDMA_mode > 2 && 2700 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2701 drvp->UDMA_mode = 2; 2702 if (drvp->UDMA_mode > 2) 2703 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 2704 else 2705 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 2706 } 2707 2708 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2709 (drvp->drive_flags & DRIVE_UDMA)) { 2710 /* use Ultra/DMA */ 2711 drvp->drive_flags &= ~DRIVE_DMA; 2712 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 2713 udmareg |= PIIX_UDMATIM_SET( 2714 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 2715 } else { 2716 /* use Multiword DMA */ 2717 drvp->drive_flags &= ~DRIVE_UDMA; 2718 if (drive == 0) { 2719 idetim |= piix_setup_idetim_timings( 2720 drvp->DMA_mode, 1, channel); 2721 } else { 2722 sidetim |= piix_setup_sidetim_timings( 2723 drvp->DMA_mode, 1, channel); 2724 idetim =PIIX_IDETIM_SET(idetim, 2725 PIIX_IDETIM_SITRE, channel); 2726 } 2727 } 2728 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2729 2730 pio: /* use PIO mode */ 2731 idetim |= piix_setup_idetim_drvs(drvp); 2732 if (drive == 0) { 2733 idetim |= piix_setup_idetim_timings( 2734 drvp->PIO_mode, 0, channel); 2735 } else { 2736 sidetim |= piix_setup_sidetim_timings( 2737 drvp->PIO_mode, 0, channel); 2738 idetim =PIIX_IDETIM_SET(idetim, 2739 PIIX_IDETIM_SITRE, channel); 2740 } 2741 } 2742 if (idedma_ctl != 0) { 2743 /* Add software bits in status register */ 2744 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2745 IDEDMA_CTL(channel), 2746 idedma_ctl); 2747 } 2748 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2749 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 2750 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 2751 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 2752 pciide_print_modes(cp); 2753 } 2754 2755 2756 /* setup ISP and RTC fields, based on mode */ 2757 u_int32_t 2758 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 2759 { 2760 2761 if (dma) 2762 return (PIIX_IDETIM_SET(0, 2763 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 2764 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 2765 channel)); 2766 else 2767 return (PIIX_IDETIM_SET(0, 2768 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 2769 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 2770 channel)); 2771 } 2772 2773 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 2774 u_int32_t 2775 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 2776 { 2777 u_int32_t ret = 0; 2778 struct channel_softc *chp = drvp->chnl_softc; 2779 u_int8_t channel = chp->channel; 2780 u_int8_t drive = drvp->drive; 2781 2782 /* 2783 * If drive is using UDMA, timings setups are independant 2784 * So just check DMA and PIO here. 2785 */ 2786 if (drvp->drive_flags & DRIVE_DMA) { 2787 /* if mode = DMA mode 0, use compatible timings */ 2788 if ((drvp->drive_flags & DRIVE_DMA) && 2789 drvp->DMA_mode == 0) { 2790 drvp->PIO_mode = 0; 2791 return (ret); 2792 } 2793 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2794 /* 2795 * PIO and DMA timings are the same, use fast timings for PIO 2796 * too, else use compat timings. 2797 */ 2798 if ((piix_isp_pio[drvp->PIO_mode] != 2799 piix_isp_dma[drvp->DMA_mode]) || 2800 (piix_rtc_pio[drvp->PIO_mode] != 2801 piix_rtc_dma[drvp->DMA_mode])) 2802 drvp->PIO_mode = 0; 2803 /* if PIO mode <= 2, use compat timings for PIO */ 2804 if (drvp->PIO_mode <= 2) { 2805 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 2806 channel); 2807 return (ret); 2808 } 2809 } 2810 2811 /* 2812 * Now setup PIO modes. If mode < 2, use compat timings. 2813 * Else enable fast timings. Enable IORDY and prefetch/post 2814 * if PIO mode >= 3. 2815 */ 2816 2817 if (drvp->PIO_mode < 2) 2818 return (ret); 2819 2820 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2821 if (drvp->PIO_mode >= 3) { 2822 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 2823 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 2824 } 2825 return (ret); 2826 } 2827 2828 /* setup values in SIDETIM registers, based on mode */ 2829 u_int32_t 2830 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 2831 { 2832 if (dma) 2833 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 2834 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 2835 else 2836 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 2837 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 2838 } 2839 2840 void 2841 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2842 { 2843 struct pciide_channel *cp; 2844 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2845 int channel; 2846 pcireg_t chanenable; 2847 bus_size_t cmdsize, ctlsize; 2848 2849 printf(": DMA"); 2850 pciide_mapreg_dma(sc, pa); 2851 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2852 WDC_CAPABILITY_MODE; 2853 if (sc->sc_dma_ok) { 2854 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2855 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2856 sc->sc_wdcdev.irqack = pciide_irqack; 2857 } 2858 sc->sc_wdcdev.PIO_cap = 4; 2859 sc->sc_wdcdev.DMA_cap = 2; 2860 switch (sc->sc_pp->ide_product) { 2861 case PCI_PRODUCT_AMD_8111_IDE: 2862 sc->sc_wdcdev.UDMA_cap = 6; 2863 break; 2864 case PCI_PRODUCT_AMD_766_IDE: 2865 case PCI_PRODUCT_AMD_PBC768_IDE: 2866 sc->sc_wdcdev.UDMA_cap = 5; 2867 break; 2868 default: 2869 sc->sc_wdcdev.UDMA_cap = 4; 2870 break; 2871 } 2872 sc->sc_wdcdev.set_modes = amd756_setup_channel; 2873 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2874 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2875 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 2876 2877 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2878 2879 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2880 cp = &sc->pciide_channels[channel]; 2881 if (pciide_chansetup(sc, channel, interface) == 0) 2882 continue; 2883 2884 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 2885 printf("%s: %s ignored (disabled)\n", 2886 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2887 continue; 2888 } 2889 pciide_map_compat_intr(pa, cp, channel, interface); 2890 if (cp->hw_ok == 0) 2891 continue; 2892 2893 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2894 pciide_pci_intr); 2895 2896 if (pciide_chan_candisable(cp)) { 2897 chanenable &= ~AMD756_CHAN_EN(channel); 2898 } 2899 if (cp->hw_ok == 0) { 2900 pciide_unmap_compat_intr(pa, cp, channel, interface); 2901 continue; 2902 } 2903 2904 amd756_setup_channel(&cp->wdc_channel); 2905 } 2906 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 2907 chanenable); 2908 return; 2909 } 2910 2911 void 2912 amd756_setup_channel(struct channel_softc *chp) 2913 { 2914 u_int32_t udmatim_reg, datatim_reg; 2915 u_int8_t idedma_ctl; 2916 int mode, drive; 2917 struct ata_drive_datas *drvp; 2918 struct pciide_channel *cp = (struct pciide_channel *)chp; 2919 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2920 pcireg_t chanenable; 2921 #ifndef PCIIDE_AMD756_ENABLEDMA 2922 int product = sc->sc_pp->ide_product; 2923 int rev = sc->sc_rev; 2924 #endif 2925 2926 idedma_ctl = 0; 2927 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 2928 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 2929 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 2930 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 2931 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 2932 AMD756_CHANSTATUS_EN); 2933 2934 /* setup DMA if needed */ 2935 pciide_channel_dma_setup(cp); 2936 2937 for (drive = 0; drive < 2; drive++) { 2938 drvp = &chp->ch_drive[drive]; 2939 /* If no drive, skip */ 2940 if ((drvp->drive_flags & DRIVE) == 0) 2941 continue; 2942 /* add timing values, setup DMA if needed */ 2943 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2944 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2945 mode = drvp->PIO_mode; 2946 goto pio; 2947 } 2948 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2949 (drvp->drive_flags & DRIVE_UDMA)) { 2950 /* use Ultra/DMA */ 2951 drvp->drive_flags &= ~DRIVE_DMA; 2952 2953 /* Check cable */ 2954 if ((chanenable & AMD756_CABLE(chp->channel, 2955 drive)) == 0 && drvp->UDMA_mode > 2) { 2956 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 2957 "cable not detected\n", drvp->drive_name, 2958 sc->sc_wdcdev.sc_dev.dv_xname, 2959 chp->channel, drive), DEBUG_PROBE); 2960 drvp->UDMA_mode = 2; 2961 } 2962 2963 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 2964 AMD756_UDMA_EN_MTH(chp->channel, drive) | 2965 AMD756_UDMA_TIME(chp->channel, drive, 2966 amd756_udma_tim[drvp->UDMA_mode]); 2967 /* can use PIO timings, MW DMA unused */ 2968 mode = drvp->PIO_mode; 2969 } else { 2970 /* use Multiword DMA, but only if revision is OK */ 2971 drvp->drive_flags &= ~DRIVE_UDMA; 2972 #ifndef PCIIDE_AMD756_ENABLEDMA 2973 /* 2974 * The workaround doesn't seem to be necessary 2975 * with all drives, so it can be disabled by 2976 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 2977 * triggered. 2978 */ 2979 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 2980 printf("%s:%d:%d: multi-word DMA disabled due " 2981 "to chip revision\n", 2982 sc->sc_wdcdev.sc_dev.dv_xname, 2983 chp->channel, drive); 2984 mode = drvp->PIO_mode; 2985 drvp->drive_flags &= ~DRIVE_DMA; 2986 goto pio; 2987 } 2988 #endif 2989 /* mode = min(pio, dma+2) */ 2990 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2991 mode = drvp->PIO_mode; 2992 else 2993 mode = drvp->DMA_mode + 2; 2994 } 2995 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2996 2997 pio: /* setup PIO mode */ 2998 if (mode <= 2) { 2999 drvp->DMA_mode = 0; 3000 drvp->PIO_mode = 0; 3001 mode = 0; 3002 } else { 3003 drvp->PIO_mode = mode; 3004 drvp->DMA_mode = mode - 2; 3005 } 3006 datatim_reg |= 3007 AMD756_DATATIM_PULSE(chp->channel, drive, 3008 amd756_pio_set[mode]) | 3009 AMD756_DATATIM_RECOV(chp->channel, drive, 3010 amd756_pio_rec[mode]); 3011 } 3012 if (idedma_ctl != 0) { 3013 /* Add software bits in status register */ 3014 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3015 IDEDMA_CTL(chp->channel), 3016 idedma_ctl); 3017 } 3018 pciide_print_modes(cp); 3019 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3020 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3021 } 3022 3023 void 3024 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3025 { 3026 struct pciide_channel *cp; 3027 pcireg_t interface; 3028 int channel; 3029 u_int32_t ideconf; 3030 bus_size_t cmdsize, ctlsize; 3031 pcitag_t tag; 3032 pcireg_t id, class; 3033 3034 /* 3035 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3036 */ 3037 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3038 interface = PCI_INTERFACE(pa->pa_class); 3039 } else { 3040 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3041 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3042 } 3043 3044 if ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT6410) || 3045 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_CX700_IDE) || 3046 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX700_IDE)) { 3047 printf(": ATA133"); 3048 sc->sc_wdcdev.UDMA_cap = 6; 3049 } else { 3050 /* 3051 * Determine the DMA capabilities by looking at the 3052 * ISA bridge. 3053 */ 3054 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3055 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3056 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3057 3058 /* 3059 * XXX On the VT8237, the ISA bridge is on a different 3060 * device. 3061 */ 3062 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3063 pa->pa_device == 15) { 3064 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3065 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3066 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3067 } 3068 3069 switch (PCI_PRODUCT(id)) { 3070 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3071 if (PCI_REVISION(class) >= 0x02) { 3072 printf(": ATA33"); 3073 sc->sc_wdcdev.UDMA_cap = 2; 3074 } else { 3075 printf(": DMA"); 3076 sc->sc_wdcdev.UDMA_cap = 0; 3077 } 3078 break; 3079 case PCI_PRODUCT_VIATECH_VT82C596A: 3080 if (PCI_REVISION(class) >= 0x12) { 3081 printf(": ATA66"); 3082 sc->sc_wdcdev.UDMA_cap = 4; 3083 } else { 3084 printf(": ATA33"); 3085 sc->sc_wdcdev.UDMA_cap = 2; 3086 } 3087 break; 3088 3089 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3090 if (PCI_REVISION(class) >= 0x40) { 3091 printf(": ATA100"); 3092 sc->sc_wdcdev.UDMA_cap = 5; 3093 } else { 3094 printf(": ATA66"); 3095 sc->sc_wdcdev.UDMA_cap = 4; 3096 } 3097 break; 3098 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3099 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3100 printf(": ATA100"); 3101 sc->sc_wdcdev.UDMA_cap = 5; 3102 break; 3103 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3104 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3105 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3106 printf(": ATA133"); 3107 sc->sc_wdcdev.UDMA_cap = 6; 3108 break; 3109 default: 3110 printf(": DMA"); 3111 sc->sc_wdcdev.UDMA_cap = 0; 3112 break; 3113 } 3114 } 3115 3116 pciide_mapreg_dma(sc, pa); 3117 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3118 WDC_CAPABILITY_MODE; 3119 if (sc->sc_dma_ok) { 3120 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3121 sc->sc_wdcdev.irqack = pciide_irqack; 3122 if (sc->sc_wdcdev.UDMA_cap > 0) 3123 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3124 } 3125 sc->sc_wdcdev.PIO_cap = 4; 3126 sc->sc_wdcdev.DMA_cap = 2; 3127 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3128 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3129 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3130 3131 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3132 3133 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3134 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3135 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3136 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3137 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3138 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3139 DEBUG_PROBE); 3140 3141 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3142 cp = &sc->pciide_channels[channel]; 3143 if (pciide_chansetup(sc, channel, interface) == 0) 3144 continue; 3145 3146 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 3147 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3148 printf("%s: %s ignored (disabled)\n", 3149 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3150 continue; 3151 } 3152 pciide_map_compat_intr(pa, cp, channel, interface); 3153 if (cp->hw_ok == 0) 3154 continue; 3155 3156 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3157 pciide_pci_intr); 3158 if (cp->hw_ok == 0) { 3159 goto next; 3160 } 3161 if (pciide_chan_candisable(cp)) { 3162 ideconf &= ~APO_IDECONF_EN(channel); 3163 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 3164 ideconf); 3165 } 3166 3167 if (cp->hw_ok == 0) 3168 goto next; 3169 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3170 next: 3171 if (cp->hw_ok == 0) 3172 pciide_unmap_compat_intr(pa, cp, channel, interface); 3173 } 3174 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3175 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3176 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3177 } 3178 3179 void 3180 apollo_setup_channel(struct channel_softc *chp) 3181 { 3182 u_int32_t udmatim_reg, datatim_reg; 3183 u_int8_t idedma_ctl; 3184 int mode, drive; 3185 struct ata_drive_datas *drvp; 3186 struct pciide_channel *cp = (struct pciide_channel *)chp; 3187 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3188 3189 idedma_ctl = 0; 3190 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3191 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3192 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3193 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3194 3195 /* setup DMA if needed */ 3196 pciide_channel_dma_setup(cp); 3197 3198 /* 3199 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3200 * downgrade to Ultra/33 if needed 3201 */ 3202 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3203 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3204 /* both drives UDMA */ 3205 if (chp->ch_drive[0].UDMA_mode > 2 && 3206 chp->ch_drive[1].UDMA_mode <= 2) { 3207 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3208 chp->ch_drive[0].UDMA_mode = 2; 3209 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3210 chp->ch_drive[0].UDMA_mode <= 2) { 3211 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3212 chp->ch_drive[1].UDMA_mode = 2; 3213 } 3214 } 3215 3216 for (drive = 0; drive < 2; drive++) { 3217 drvp = &chp->ch_drive[drive]; 3218 /* If no drive, skip */ 3219 if ((drvp->drive_flags & DRIVE) == 0) 3220 continue; 3221 /* add timing values, setup DMA if needed */ 3222 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3223 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3224 mode = drvp->PIO_mode; 3225 goto pio; 3226 } 3227 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3228 (drvp->drive_flags & DRIVE_UDMA)) { 3229 /* use Ultra/DMA */ 3230 drvp->drive_flags &= ~DRIVE_DMA; 3231 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3232 APO_UDMA_EN_MTH(chp->channel, drive); 3233 if (sc->sc_wdcdev.UDMA_cap == 6) { 3234 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3235 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3236 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3237 /* 686b */ 3238 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3239 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3240 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3241 /* 596b or 686a */ 3242 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3243 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3244 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3245 } else { 3246 /* 596a or 586b */ 3247 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3248 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3249 } 3250 /* can use PIO timings, MW DMA unused */ 3251 mode = drvp->PIO_mode; 3252 } else { 3253 /* use Multiword DMA */ 3254 drvp->drive_flags &= ~DRIVE_UDMA; 3255 /* mode = min(pio, dma+2) */ 3256 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3257 mode = drvp->PIO_mode; 3258 else 3259 mode = drvp->DMA_mode + 2; 3260 } 3261 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3262 3263 pio: /* setup PIO mode */ 3264 if (mode <= 2) { 3265 drvp->DMA_mode = 0; 3266 drvp->PIO_mode = 0; 3267 mode = 0; 3268 } else { 3269 drvp->PIO_mode = mode; 3270 drvp->DMA_mode = mode - 2; 3271 } 3272 datatim_reg |= 3273 APO_DATATIM_PULSE(chp->channel, drive, 3274 apollo_pio_set[mode]) | 3275 APO_DATATIM_RECOV(chp->channel, drive, 3276 apollo_pio_rec[mode]); 3277 } 3278 if (idedma_ctl != 0) { 3279 /* Add software bits in status register */ 3280 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3281 IDEDMA_CTL(chp->channel), 3282 idedma_ctl); 3283 } 3284 pciide_print_modes(cp); 3285 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3286 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3287 } 3288 3289 void 3290 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3291 int channel) 3292 { 3293 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3294 bus_size_t cmdsize, ctlsize; 3295 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3296 pcireg_t interface; 3297 int one_channel; 3298 3299 /* 3300 * The 0648/0649 can be told to identify as a RAID controller. 3301 * In this case, we have to fake interface 3302 */ 3303 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3304 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3305 PCIIDE_INTERFACE_SETTABLE(1); 3306 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3307 CMD_CONF_DSA1) 3308 interface |= PCIIDE_INTERFACE_PCI(0) | 3309 PCIIDE_INTERFACE_PCI(1); 3310 } else { 3311 interface = PCI_INTERFACE(pa->pa_class); 3312 } 3313 3314 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3315 cp->name = PCIIDE_CHANNEL_NAME(channel); 3316 cp->wdc_channel.channel = channel; 3317 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3318 3319 /* 3320 * Older CMD64X doesn't have independant channels 3321 */ 3322 switch (sc->sc_pp->ide_product) { 3323 case PCI_PRODUCT_CMDTECH_649: 3324 one_channel = 0; 3325 break; 3326 default: 3327 one_channel = 1; 3328 break; 3329 } 3330 3331 if (channel > 0 && one_channel) { 3332 cp->wdc_channel.ch_queue = 3333 sc->pciide_channels[0].wdc_channel.ch_queue; 3334 } else { 3335 cp->wdc_channel.ch_queue = 3336 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3337 } 3338 if (cp->wdc_channel.ch_queue == NULL) { 3339 printf( 3340 "%s: %s cannot allocate memory for command queue", 3341 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3342 return; 3343 } 3344 3345 /* 3346 * with a CMD PCI64x, if we get here, the first channel is enabled: 3347 * there's no way to disable the first channel without disabling 3348 * the whole device 3349 */ 3350 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3351 printf("%s: %s ignored (disabled)\n", 3352 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3353 return; 3354 } 3355 cp->hw_ok = 1; 3356 pciide_map_compat_intr(pa, cp, channel, interface); 3357 if (cp->hw_ok == 0) 3358 return; 3359 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3360 if (cp->hw_ok == 0) { 3361 pciide_unmap_compat_intr(pa, cp, channel, interface); 3362 return; 3363 } 3364 if (pciide_chan_candisable(cp)) { 3365 if (channel == 1) { 3366 ctrl &= ~CMD_CTRL_2PORT; 3367 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3368 CMD_CTRL, ctrl); 3369 pciide_unmap_compat_intr(pa, cp, channel, interface); 3370 } 3371 } 3372 } 3373 3374 int 3375 cmd_pci_intr(void *arg) 3376 { 3377 struct pciide_softc *sc = arg; 3378 struct pciide_channel *cp; 3379 struct channel_softc *wdc_cp; 3380 int i, rv, crv; 3381 u_int32_t priirq, secirq; 3382 3383 rv = 0; 3384 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3385 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3386 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3387 cp = &sc->pciide_channels[i]; 3388 wdc_cp = &cp->wdc_channel; 3389 /* If a compat channel skip. */ 3390 if (cp->compat) 3391 continue; 3392 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3393 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3394 crv = wdcintr(wdc_cp); 3395 if (crv == 0) { 3396 #if 0 3397 printf("%s:%d: bogus intr\n", 3398 sc->sc_wdcdev.sc_dev.dv_xname, i); 3399 #endif 3400 } else 3401 rv = 1; 3402 } 3403 } 3404 return (rv); 3405 } 3406 3407 void 3408 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3409 { 3410 int channel; 3411 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3412 3413 printf(": no DMA"); 3414 sc->sc_dma_ok = 0; 3415 3416 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3417 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3418 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3419 3420 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3421 3422 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3423 cmd_channel_map(pa, sc, channel); 3424 } 3425 } 3426 3427 void 3428 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3429 { 3430 struct pciide_channel *cp; 3431 int channel; 3432 int rev = sc->sc_rev; 3433 pcireg_t interface; 3434 3435 /* 3436 * The 0648/0649 can be told to identify as a RAID controller. 3437 * In this case, we have to fake interface 3438 */ 3439 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3440 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3441 PCIIDE_INTERFACE_SETTABLE(1); 3442 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3443 CMD_CONF_DSA1) 3444 interface |= PCIIDE_INTERFACE_PCI(0) | 3445 PCIIDE_INTERFACE_PCI(1); 3446 } else { 3447 interface = PCI_INTERFACE(pa->pa_class); 3448 } 3449 3450 printf(": DMA"); 3451 pciide_mapreg_dma(sc, pa); 3452 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3453 WDC_CAPABILITY_MODE; 3454 if (sc->sc_dma_ok) { 3455 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3456 switch (sc->sc_pp->ide_product) { 3457 case PCI_PRODUCT_CMDTECH_649: 3458 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3459 sc->sc_wdcdev.UDMA_cap = 5; 3460 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3461 break; 3462 case PCI_PRODUCT_CMDTECH_648: 3463 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3464 sc->sc_wdcdev.UDMA_cap = 4; 3465 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3466 break; 3467 case PCI_PRODUCT_CMDTECH_646: 3468 if (rev >= CMD0646U2_REV) { 3469 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3470 sc->sc_wdcdev.UDMA_cap = 2; 3471 } else if (rev >= CMD0646U_REV) { 3472 /* 3473 * Linux's driver claims that the 646U is broken 3474 * with UDMA. Only enable it if we know what we're 3475 * doing 3476 */ 3477 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3478 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3479 sc->sc_wdcdev.UDMA_cap = 2; 3480 #endif 3481 /* explicitly disable UDMA */ 3482 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3483 CMD_UDMATIM(0), 0); 3484 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3485 CMD_UDMATIM(1), 0); 3486 } 3487 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3488 break; 3489 default: 3490 sc->sc_wdcdev.irqack = pciide_irqack; 3491 } 3492 } 3493 3494 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3495 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3496 sc->sc_wdcdev.PIO_cap = 4; 3497 sc->sc_wdcdev.DMA_cap = 2; 3498 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3499 3500 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3501 3502 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3503 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3504 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3505 DEBUG_PROBE); 3506 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3507 cp = &sc->pciide_channels[channel]; 3508 cmd_channel_map(pa, sc, channel); 3509 if (cp->hw_ok == 0) 3510 continue; 3511 cmd0643_9_setup_channel(&cp->wdc_channel); 3512 } 3513 /* 3514 * note - this also makes sure we clear the irq disable and reset 3515 * bits 3516 */ 3517 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3518 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3519 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3520 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3521 DEBUG_PROBE); 3522 } 3523 3524 void 3525 cmd0643_9_setup_channel(struct channel_softc *chp) 3526 { 3527 struct ata_drive_datas *drvp; 3528 u_int8_t tim; 3529 u_int32_t idedma_ctl, udma_reg; 3530 int drive; 3531 struct pciide_channel *cp = (struct pciide_channel *)chp; 3532 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3533 3534 idedma_ctl = 0; 3535 /* setup DMA if needed */ 3536 pciide_channel_dma_setup(cp); 3537 3538 for (drive = 0; drive < 2; drive++) { 3539 drvp = &chp->ch_drive[drive]; 3540 /* If no drive, skip */ 3541 if ((drvp->drive_flags & DRIVE) == 0) 3542 continue; 3543 /* add timing values, setup DMA if needed */ 3544 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3545 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3546 if (drvp->drive_flags & DRIVE_UDMA) { 3547 /* UltraDMA on a 646U2, 0648 or 0649 */ 3548 drvp->drive_flags &= ~DRIVE_DMA; 3549 udma_reg = pciide_pci_read(sc->sc_pc, 3550 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3551 if (drvp->UDMA_mode > 2 && 3552 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3553 CMD_BICSR) & 3554 CMD_BICSR_80(chp->channel)) == 0) { 3555 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3556 "80-wire cable not detected\n", 3557 drvp->drive_name, 3558 sc->sc_wdcdev.sc_dev.dv_xname, 3559 chp->channel, drive), DEBUG_PROBE); 3560 drvp->UDMA_mode = 2; 3561 } 3562 if (drvp->UDMA_mode > 2) 3563 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 3564 else if (sc->sc_wdcdev.UDMA_cap > 2) 3565 udma_reg |= CMD_UDMATIM_UDMA33(drive); 3566 udma_reg |= CMD_UDMATIM_UDMA(drive); 3567 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 3568 CMD_UDMATIM_TIM_OFF(drive)); 3569 udma_reg |= 3570 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 3571 CMD_UDMATIM_TIM_OFF(drive)); 3572 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3573 CMD_UDMATIM(chp->channel), udma_reg); 3574 } else { 3575 /* 3576 * use Multiword DMA. 3577 * Timings will be used for both PIO and DMA, 3578 * so adjust DMA mode if needed 3579 * if we have a 0646U2/8/9, turn off UDMA 3580 */ 3581 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 3582 udma_reg = pciide_pci_read(sc->sc_pc, 3583 sc->sc_tag, 3584 CMD_UDMATIM(chp->channel)); 3585 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 3586 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3587 CMD_UDMATIM(chp->channel), 3588 udma_reg); 3589 } 3590 if (drvp->PIO_mode >= 3 && 3591 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3592 drvp->DMA_mode = drvp->PIO_mode - 2; 3593 } 3594 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 3595 } 3596 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3597 } 3598 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3599 CMD_DATA_TIM(chp->channel, drive), tim); 3600 } 3601 if (idedma_ctl != 0) { 3602 /* Add software bits in status register */ 3603 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3604 IDEDMA_CTL(chp->channel), 3605 idedma_ctl); 3606 } 3607 pciide_print_modes(cp); 3608 #ifdef __sparc64__ 3609 /* 3610 * The Ultra 5 has a tendency to hang during reboot. This is due 3611 * to the PCI0646U asserting a PCI interrupt line when the chip 3612 * registers claim that it is not. Performing a reset at this 3613 * point appears to eliminate the symptoms. It is likely the 3614 * real cause is still lurking somewhere in the code. 3615 */ 3616 wdcreset(chp, SILENT); 3617 #endif /* __sparc64__ */ 3618 } 3619 3620 void 3621 cmd646_9_irqack(struct channel_softc *chp) 3622 { 3623 u_int32_t priirq, secirq; 3624 struct pciide_channel *cp = (struct pciide_channel *)chp; 3625 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3626 3627 if (chp->channel == 0) { 3628 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3629 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 3630 } else { 3631 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3632 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 3633 } 3634 pciide_irqack(chp); 3635 } 3636 3637 void 3638 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3639 { 3640 struct pciide_channel *cp; 3641 int channel; 3642 3643 printf("\n%s: bus-master DMA support present", 3644 sc->sc_wdcdev.sc_dev.dv_xname); 3645 pciide_mapreg_dma(sc, pa); 3646 printf("\n"); 3647 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3648 WDC_CAPABILITY_MODE; 3649 if (sc->sc_dma_ok) { 3650 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3651 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3652 sc->sc_wdcdev.UDMA_cap = 6; 3653 sc->sc_wdcdev.irqack = pciide_irqack; 3654 } 3655 3656 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3657 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3658 sc->sc_wdcdev.PIO_cap = 4; 3659 sc->sc_wdcdev.DMA_cap = 2; 3660 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 3661 3662 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 3663 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 3664 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 3665 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 3666 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3667 cp = &sc->pciide_channels[channel]; 3668 cmd680_channel_map(pa, sc, channel); 3669 if (cp->hw_ok == 0) 3670 continue; 3671 cmd680_setup_channel(&cp->wdc_channel); 3672 } 3673 } 3674 3675 void 3676 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3677 int channel) 3678 { 3679 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3680 bus_size_t cmdsize, ctlsize; 3681 int interface, i, reg; 3682 static const u_int8_t init_val[] = 3683 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 3684 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 3685 3686 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3687 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3688 PCIIDE_INTERFACE_SETTABLE(1); 3689 interface |= PCIIDE_INTERFACE_PCI(0) | 3690 PCIIDE_INTERFACE_PCI(1); 3691 } else { 3692 interface = PCI_INTERFACE(pa->pa_class); 3693 } 3694 3695 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3696 cp->name = PCIIDE_CHANNEL_NAME(channel); 3697 cp->wdc_channel.channel = channel; 3698 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3699 3700 cp->wdc_channel.ch_queue = 3701 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3702 if (cp->wdc_channel.ch_queue == NULL) { 3703 printf("%s %s: " 3704 "can't allocate memory for command queue", 3705 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3706 return; 3707 } 3708 3709 /* XXX */ 3710 reg = 0xa2 + channel * 16; 3711 for (i = 0; i < sizeof(init_val); i++) 3712 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 3713 3714 printf("%s: %s %s to %s mode\n", 3715 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 3716 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 3717 "configured" : "wired", 3718 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 3719 "native-PCI" : "compatibility"); 3720 3721 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 3722 if (cp->hw_ok == 0) 3723 return; 3724 pciide_map_compat_intr(pa, cp, channel, interface); 3725 } 3726 3727 void 3728 cmd680_setup_channel(struct channel_softc *chp) 3729 { 3730 struct ata_drive_datas *drvp; 3731 u_int8_t mode, off, scsc; 3732 u_int16_t val; 3733 u_int32_t idedma_ctl; 3734 int drive; 3735 struct pciide_channel *cp = (struct pciide_channel *)chp; 3736 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3737 pci_chipset_tag_t pc = sc->sc_pc; 3738 pcitag_t pa = sc->sc_tag; 3739 static const u_int8_t udma2_tbl[] = 3740 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 3741 static const u_int8_t udma_tbl[] = 3742 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 3743 static const u_int16_t dma_tbl[] = 3744 { 0x2208, 0x10c2, 0x10c1 }; 3745 static const u_int16_t pio_tbl[] = 3746 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 3747 3748 idedma_ctl = 0; 3749 pciide_channel_dma_setup(cp); 3750 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 3751 3752 for (drive = 0; drive < 2; drive++) { 3753 drvp = &chp->ch_drive[drive]; 3754 /* If no drive, skip */ 3755 if ((drvp->drive_flags & DRIVE) == 0) 3756 continue; 3757 mode &= ~(0x03 << (drive * 4)); 3758 if (drvp->drive_flags & DRIVE_UDMA) { 3759 drvp->drive_flags &= ~DRIVE_DMA; 3760 off = 0xa0 + chp->channel * 16; 3761 if (drvp->UDMA_mode > 2 && 3762 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 3763 drvp->UDMA_mode = 2; 3764 scsc = pciide_pci_read(pc, pa, 0x8a); 3765 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 3766 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 3767 scsc = pciide_pci_read(pc, pa, 0x8a); 3768 if ((scsc & 0x30) == 0) 3769 drvp->UDMA_mode = 5; 3770 } 3771 mode |= 0x03 << (drive * 4); 3772 off = 0xac + chp->channel * 16 + drive * 2; 3773 val = pciide_pci_read(pc, pa, off) & ~0x3f; 3774 if (scsc & 0x30) 3775 val |= udma2_tbl[drvp->UDMA_mode]; 3776 else 3777 val |= udma_tbl[drvp->UDMA_mode]; 3778 pciide_pci_write(pc, pa, off, val); 3779 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3780 } else if (drvp->drive_flags & DRIVE_DMA) { 3781 mode |= 0x02 << (drive * 4); 3782 off = 0xa8 + chp->channel * 16 + drive * 2; 3783 val = dma_tbl[drvp->DMA_mode]; 3784 pciide_pci_write(pc, pa, off, val & 0xff); 3785 pciide_pci_write(pc, pa, off, val >> 8); 3786 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3787 } else { 3788 mode |= 0x01 << (drive * 4); 3789 off = 0xa4 + chp->channel * 16 + drive * 2; 3790 val = pio_tbl[drvp->PIO_mode]; 3791 pciide_pci_write(pc, pa, off, val & 0xff); 3792 pciide_pci_write(pc, pa, off, val >> 8); 3793 } 3794 } 3795 3796 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 3797 if (idedma_ctl != 0) { 3798 /* Add software bits in status register */ 3799 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3800 IDEDMA_CTL(chp->channel), 3801 idedma_ctl); 3802 } 3803 pciide_print_modes(cp); 3804 } 3805 3806 /* 3807 * When the Silicon Image 3112 retries a PCI memory read command, 3808 * it may retry it as a memory read multiple command under some 3809 * circumstances. This can totally confuse some PCI controllers, 3810 * so ensure that it will never do this by making sure that the 3811 * Read Threshold (FIFO Read Request Control) field of the FIFO 3812 * Valid Byte Count and Control registers for both channels (BA5 3813 * offset 0x40 and 0x44) are set to be at least as large as the 3814 * cacheline size register. 3815 */ 3816 void 3817 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 3818 { 3819 pcireg_t cls, reg40, reg44; 3820 3821 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 3822 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 3823 cls *= 4; 3824 if (cls > 224) { 3825 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 3826 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 3827 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 3828 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 3829 cls = 224; 3830 } 3831 if (cls < 32) 3832 cls = 32; 3833 cls = (cls + 31) / 32; 3834 reg40 = ba5_read_4(sc, 0x40); 3835 reg44 = ba5_read_4(sc, 0x44); 3836 if ((reg40 & 0x7) < cls) 3837 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 3838 if ((reg44 & 0x7) < cls) 3839 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 3840 } 3841 3842 void 3843 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3844 { 3845 struct pciide_channel *cp; 3846 bus_size_t cmdsize, ctlsize; 3847 pcireg_t interface, scs_cmd, cfgctl; 3848 int channel; 3849 struct pciide_satalink *sl = sc->sc_cookie; 3850 3851 /* Allocate memory for private data */ 3852 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 3853 sl = sc->sc_cookie; 3854 3855 #define SII3112_RESET_BITS \ 3856 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 3857 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 3858 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 3859 3860 /* 3861 * Reset everything and then unblock all of the interrupts. 3862 */ 3863 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 3864 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 3865 scs_cmd | SII3112_RESET_BITS); 3866 delay(50 * 1000); 3867 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 3868 scs_cmd & SCS_CMD_BA5_EN); 3869 delay(50 * 1000); 3870 3871 if (scs_cmd & SCS_CMD_BA5_EN) { 3872 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 3873 PCI_MAPREG_TYPE_MEM | 3874 PCI_MAPREG_MEM_TYPE_32BIT, 0, 3875 &sl->ba5_st, &sl->ba5_sh, 3876 NULL, NULL, 0) != 0) 3877 printf(": unable to map BA5 register space\n"); 3878 else 3879 sl->ba5_en = 1; 3880 } else { 3881 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 3882 SII3112_PCI_CFGCTL); 3883 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 3884 cfgctl | CFGCTL_BA5INDEN); 3885 } 3886 3887 printf(": DMA"); 3888 pciide_mapreg_dma(sc, pa); 3889 printf("\n"); 3890 3891 /* 3892 * Rev. <= 0x01 of the 3112 have a bug that can cause data 3893 * corruption if DMA transfers cross an 8K boundary. This is 3894 * apparently hard to tickle, but we'll go ahead and play it 3895 * safe. 3896 */ 3897 if (sc->sc_rev <= 0x01) { 3898 sc->sc_dma_maxsegsz = 8192; 3899 sc->sc_dma_boundary = 8192; 3900 } 3901 3902 sii_fixup_cacheline(sc, pa); 3903 3904 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 3905 sc->sc_wdcdev.PIO_cap = 4; 3906 if (sc->sc_dma_ok) { 3907 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3908 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3909 sc->sc_wdcdev.irqack = pciide_irqack; 3910 sc->sc_wdcdev.DMA_cap = 2; 3911 sc->sc_wdcdev.UDMA_cap = 6; 3912 } 3913 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 3914 3915 /* We can use SControl and SStatus to probe for drives. */ 3916 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 3917 3918 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3919 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3920 3921 /* 3922 * The 3112 either identifies itself as a RAID storage device 3923 * or a Misc storage device. Fake up the interface bits for 3924 * what our driver expects. 3925 */ 3926 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3927 interface = PCI_INTERFACE(pa->pa_class); 3928 } else { 3929 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3930 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3931 } 3932 3933 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3934 cp = &sc->pciide_channels[channel]; 3935 if (pciide_chansetup(sc, channel, interface) == 0) 3936 continue; 3937 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3938 pciide_pci_intr); 3939 if (cp->hw_ok == 0) 3940 continue; 3941 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 3942 } 3943 } 3944 3945 void 3946 sii3112_setup_channel(struct channel_softc *chp) 3947 { 3948 struct ata_drive_datas *drvp; 3949 int drive; 3950 u_int32_t idedma_ctl, dtm; 3951 struct pciide_channel *cp = (struct pciide_channel *)chp; 3952 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3953 3954 /* setup DMA if needed */ 3955 pciide_channel_dma_setup(cp); 3956 3957 idedma_ctl = 0; 3958 dtm = 0; 3959 3960 for (drive = 0; drive < 2; drive++) { 3961 drvp = &chp->ch_drive[drive]; 3962 /* If no drive, skip */ 3963 if ((drvp->drive_flags & DRIVE) == 0) 3964 continue; 3965 if (drvp->drive_flags & DRIVE_UDMA) { 3966 /* use Ultra/DMA */ 3967 drvp->drive_flags &= ~DRIVE_DMA; 3968 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3969 dtm |= DTM_IDEx_DMA; 3970 } else if (drvp->drive_flags & DRIVE_DMA) { 3971 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3972 dtm |= DTM_IDEx_DMA; 3973 } else { 3974 dtm |= DTM_IDEx_PIO; 3975 } 3976 } 3977 3978 /* 3979 * Nothing to do to setup modes; it is meaningless in S-ATA 3980 * (but many S-ATA drives still want to get the SET_FEATURE 3981 * command). 3982 */ 3983 if (idedma_ctl != 0) { 3984 /* Add software bits in status register */ 3985 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 3986 } 3987 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 3988 pciide_print_modes(cp); 3989 } 3990 3991 void 3992 sii3112_drv_probe(struct channel_softc *chp) 3993 { 3994 struct pciide_channel *cp = (struct pciide_channel *)chp; 3995 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3996 uint32_t scontrol, sstatus; 3997 uint8_t scnt, sn, cl, ch; 3998 int i, s; 3999 4000 /* XXX This should be done by other code. */ 4001 for (i = 0; i < 2; i++) { 4002 chp->ch_drive[i].chnl_softc = chp; 4003 chp->ch_drive[i].drive = i; 4004 } 4005 4006 /* 4007 * The 3112 is a 2-port part, and only has one drive per channel 4008 * (each port emulates a master drive). 4009 * 4010 * The 3114 is similar, but has 4 channels. 4011 */ 4012 4013 /* 4014 * Request communication initialization sequence, any speed. 4015 * Performing this is the equivalent of an ATA Reset. 4016 */ 4017 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4018 4019 /* 4020 * XXX We don't yet support SATA power management; disable all 4021 * power management state transitions. 4022 */ 4023 scontrol |= SControl_IPM_NONE; 4024 4025 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4026 delay(50 * 1000); 4027 scontrol &= ~SControl_DET_INIT; 4028 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4029 delay(50 * 1000); 4030 4031 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4032 #if 0 4033 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4034 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4035 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4036 #endif 4037 switch (sstatus & SStatus_DET_mask) { 4038 case SStatus_DET_NODEV: 4039 /* No device; be silent. */ 4040 break; 4041 4042 case SStatus_DET_DEV_NE: 4043 printf("%s: port %d: device connected, but " 4044 "communication not established\n", 4045 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4046 break; 4047 4048 case SStatus_DET_OFFLINE: 4049 printf("%s: port %d: PHY offline\n", 4050 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4051 break; 4052 4053 case SStatus_DET_DEV: 4054 /* 4055 * XXX ATAPI detection doesn't currently work. Don't 4056 * XXX know why. But, it's not like the standard method 4057 * XXX can detect an ATAPI device connected via a SATA/PATA 4058 * XXX bridge, so at least this is no worse. --thorpej 4059 */ 4060 if (chp->_vtbl != NULL) 4061 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4062 else 4063 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4064 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4065 delay(10); /* 400ns delay */ 4066 /* Save register contents. */ 4067 if (chp->_vtbl != NULL) { 4068 scnt = CHP_READ_REG(chp, wdr_seccnt); 4069 sn = CHP_READ_REG(chp, wdr_sector); 4070 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4071 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4072 } else { 4073 scnt = bus_space_read_1(chp->cmd_iot, 4074 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4075 sn = bus_space_read_1(chp->cmd_iot, 4076 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4077 cl = bus_space_read_1(chp->cmd_iot, 4078 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4079 ch = bus_space_read_1(chp->cmd_iot, 4080 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4081 } 4082 #if 0 4083 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4084 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4085 scnt, sn, cl, ch); 4086 #endif 4087 /* 4088 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4089 * cases we get wrong values here, so ignore it. 4090 */ 4091 s = splbio(); 4092 if (cl == 0x14 && ch == 0xeb) 4093 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4094 else 4095 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4096 splx(s); 4097 4098 printf("%s: port %d: device present", 4099 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4100 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4101 case 1: 4102 printf(", speed: 1.5Gb/s"); 4103 break; 4104 case 2: 4105 printf(", speed: 3.0Gb/s"); 4106 break; 4107 } 4108 printf("\n"); 4109 break; 4110 4111 default: 4112 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4113 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4114 } 4115 } 4116 4117 void 4118 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4119 { 4120 struct pciide_channel *cp; 4121 pcireg_t scs_cmd; 4122 pci_intr_handle_t intrhandle; 4123 const char *intrstr; 4124 int channel; 4125 struct pciide_satalink *sl = sc->sc_cookie; 4126 4127 /* Allocate memory for private data */ 4128 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4129 sl = sc->sc_cookie; 4130 4131 #define SII3114_RESET_BITS \ 4132 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4133 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4134 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4135 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4136 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4137 4138 /* 4139 * Reset everything and then unblock all of the interrupts. 4140 */ 4141 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4142 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4143 scs_cmd | SII3114_RESET_BITS); 4144 delay(50 * 1000); 4145 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4146 scs_cmd & SCS_CMD_M66EN); 4147 delay(50 * 1000); 4148 4149 /* 4150 * On the 3114, the BA5 register space is always enabled. In 4151 * order to use the 3114 in any sane way, we must use this BA5 4152 * register space, and so we consider it an error if we cannot 4153 * map it. 4154 * 4155 * As a consequence of using BA5, our register mapping is different 4156 * from a normal PCI IDE controller's, and so we are unable to use 4157 * most of the common PCI IDE register mapping functions. 4158 */ 4159 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4160 PCI_MAPREG_TYPE_MEM | 4161 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4162 &sl->ba5_st, &sl->ba5_sh, 4163 NULL, NULL, 0) != 0) { 4164 printf(": unable to map BA5 register space\n"); 4165 return; 4166 } 4167 sl->ba5_en = 1; 4168 4169 /* 4170 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4171 * channel 2. This is required at all times for proper operation 4172 * when using the BA5 register space (otherwise interrupts from 4173 * all 4 channels won't work). 4174 */ 4175 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4176 4177 printf(": DMA"); 4178 sii3114_mapreg_dma(sc, pa); 4179 printf("\n"); 4180 4181 sii_fixup_cacheline(sc, pa); 4182 4183 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4184 sc->sc_wdcdev.PIO_cap = 4; 4185 if (sc->sc_dma_ok) { 4186 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4187 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4188 sc->sc_wdcdev.irqack = pciide_irqack; 4189 sc->sc_wdcdev.DMA_cap = 2; 4190 sc->sc_wdcdev.UDMA_cap = 6; 4191 } 4192 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4193 4194 /* We can use SControl and SStatus to probe for drives. */ 4195 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4196 4197 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4198 sc->sc_wdcdev.nchannels = 4; 4199 4200 /* Map and establish the interrupt handler. */ 4201 if (pci_intr_map(pa, &intrhandle) != 0) { 4202 printf("%s: couldn't map native-PCI interrupt\n", 4203 sc->sc_wdcdev.sc_dev.dv_xname); 4204 return; 4205 } 4206 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4207 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4208 /* XXX */ 4209 pciide_pci_intr, sc, 4210 sc->sc_wdcdev.sc_dev.dv_xname); 4211 if (sc->sc_pci_ih != NULL) { 4212 printf("%s: using %s for native-PCI interrupt\n", 4213 sc->sc_wdcdev.sc_dev.dv_xname, 4214 intrstr ? intrstr : "unknown interrupt"); 4215 } else { 4216 printf("%s: couldn't establish native-PCI interrupt", 4217 sc->sc_wdcdev.sc_dev.dv_xname); 4218 if (intrstr != NULL) 4219 printf(" at %s", intrstr); 4220 printf("\n"); 4221 return; 4222 } 4223 4224 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4225 cp = &sc->pciide_channels[channel]; 4226 if (sii3114_chansetup(sc, channel) == 0) 4227 continue; 4228 sii3114_mapchan(cp); 4229 if (cp->hw_ok == 0) 4230 continue; 4231 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4232 } 4233 } 4234 4235 void 4236 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4237 { 4238 int chan, reg; 4239 bus_size_t size; 4240 struct pciide_satalink *sl = sc->sc_cookie; 4241 4242 sc->sc_wdcdev.dma_arg = sc; 4243 sc->sc_wdcdev.dma_init = pciide_dma_init; 4244 sc->sc_wdcdev.dma_start = pciide_dma_start; 4245 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4246 4247 /* 4248 * Slice off a subregion of BA5 for each of the channel's DMA 4249 * registers. 4250 */ 4251 4252 sc->sc_dma_iot = sl->ba5_st; 4253 for (chan = 0; chan < 4; chan++) { 4254 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4255 size = 4; 4256 if (size > (IDEDMA_SCH_OFFSET - reg)) 4257 size = IDEDMA_SCH_OFFSET - reg; 4258 if (bus_space_subregion(sl->ba5_st, 4259 sl->ba5_sh, 4260 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4261 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4262 sc->sc_dma_ok = 0; 4263 printf(": can't subregion offset " 4264 "%lu size %lu", 4265 (u_long) satalink_ba5_regmap[ 4266 chan].ba5_IDEDMA_CMD + reg, 4267 (u_long) size); 4268 return; 4269 } 4270 } 4271 } 4272 4273 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4274 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4275 sc->sc_dmactl_read = sii3114_dmactl_read; 4276 sc->sc_dmactl_write = sii3114_dmactl_write; 4277 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4278 4279 /* DMA registers all set up! */ 4280 sc->sc_dmat = pa->pa_dmat; 4281 sc->sc_dma_ok = 1; 4282 } 4283 4284 int 4285 sii3114_chansetup(struct pciide_softc *sc, int channel) 4286 { 4287 static const char *channel_names[] = { 4288 "port 0", 4289 "port 1", 4290 "port 2", 4291 "port 3", 4292 }; 4293 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4294 4295 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4296 4297 /* 4298 * We must always keep the Interrupt Steering bit set in channel 2's 4299 * IDEDMA_CMD register. 4300 */ 4301 if (channel == 2) 4302 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4303 4304 cp->name = channel_names[channel]; 4305 cp->wdc_channel.channel = channel; 4306 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4307 cp->wdc_channel.ch_queue = 4308 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 4309 if (cp->wdc_channel.ch_queue == NULL) { 4310 printf("%s %s channel: " 4311 "can't allocate memory for command queue", 4312 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4313 return (0); 4314 } 4315 return (1); 4316 } 4317 4318 void 4319 sii3114_mapchan(struct pciide_channel *cp) 4320 { 4321 struct channel_softc *wdc_cp = &cp->wdc_channel; 4322 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4323 struct pciide_satalink *sl = sc->sc_cookie; 4324 int chan = wdc_cp->channel; 4325 int i; 4326 4327 cp->hw_ok = 0; 4328 cp->compat = 0; 4329 cp->ih = sc->sc_pci_ih; 4330 4331 sl->regs[chan].cmd_iot = sl->ba5_st; 4332 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4333 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4334 9, &sl->regs[chan].cmd_baseioh) != 0) { 4335 printf("%s: couldn't subregion %s cmd base\n", 4336 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4337 return; 4338 } 4339 4340 sl->regs[chan].ctl_iot = sl->ba5_st; 4341 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4342 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4343 1, &cp->ctl_baseioh) != 0) { 4344 printf("%s: couldn't subregion %s ctl base\n", 4345 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4346 return; 4347 } 4348 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4349 4350 for (i = 0; i < WDC_NREG; i++) { 4351 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4352 sl->regs[chan].cmd_baseioh, 4353 i, i == 0 ? 4 : 1, 4354 &sl->regs[chan].cmd_iohs[i]) != 0) { 4355 printf("%s: couldn't subregion %s channel " 4356 "cmd regs\n", 4357 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4358 return; 4359 } 4360 } 4361 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4362 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4363 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4364 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4365 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4366 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4367 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4368 wdcattach(wdc_cp); 4369 cp->hw_ok = 1; 4370 } 4371 4372 u_int8_t 4373 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4374 { 4375 struct pciide_channel *cp = (struct pciide_channel *)chp; 4376 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4377 struct pciide_satalink *sl = sc->sc_cookie; 4378 4379 if (reg & _WDC_AUX) 4380 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4381 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4382 else 4383 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4384 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4385 } 4386 4387 void 4388 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4389 { 4390 struct pciide_channel *cp = (struct pciide_channel *)chp; 4391 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4392 struct pciide_satalink *sl = sc->sc_cookie; 4393 4394 if (reg & _WDC_AUX) 4395 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4396 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4397 else 4398 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4399 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4400 0, val); 4401 } 4402 4403 u_int8_t 4404 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4405 { 4406 struct pciide_satalink *sl = sc->sc_cookie; 4407 4408 return (bus_space_read_1(sc->sc_dma_iot, 4409 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4410 } 4411 4412 void 4413 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4414 { 4415 struct pciide_satalink *sl = sc->sc_cookie; 4416 4417 bus_space_write_1(sc->sc_dma_iot, 4418 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4419 } 4420 4421 u_int8_t 4422 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4423 { 4424 struct pciide_satalink *sl = sc->sc_cookie; 4425 4426 return (bus_space_read_1(sc->sc_dma_iot, 4427 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4428 } 4429 4430 void 4431 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4432 { 4433 struct pciide_satalink *sl = sc->sc_cookie; 4434 4435 bus_space_write_1(sc->sc_dma_iot, 4436 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4437 } 4438 4439 void 4440 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4441 { 4442 struct pciide_satalink *sl = sc->sc_cookie; 4443 4444 bus_space_write_4(sc->sc_dma_iot, 4445 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4446 } 4447 4448 void 4449 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4450 { 4451 struct pciide_channel *cp; 4452 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4453 bus_size_t cmdsize, ctlsize; 4454 struct pciide_cy *cy; 4455 4456 /* Allocate memory for private data */ 4457 sc->sc_cookie = malloc(sizeof(*cy), M_DEVBUF, M_NOWAIT | M_ZERO); 4458 cy = sc->sc_cookie; 4459 4460 /* 4461 * this chip has 2 PCI IDE functions, one for primary and one for 4462 * secondary. So we need to call pciide_mapregs_compat() with 4463 * the real channel 4464 */ 4465 if (pa->pa_function == 1) { 4466 cy->cy_compatchan = 0; 4467 } else if (pa->pa_function == 2) { 4468 cy->cy_compatchan = 1; 4469 } else { 4470 printf(": unexpected PCI function %d\n", pa->pa_function); 4471 return; 4472 } 4473 4474 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4475 printf(": DMA"); 4476 pciide_mapreg_dma(sc, pa); 4477 } else { 4478 printf(": no DMA"); 4479 sc->sc_dma_ok = 0; 4480 } 4481 4482 cy->cy_handle = cy82c693_init(pa->pa_iot); 4483 if (cy->cy_handle == NULL) { 4484 printf(", (unable to map ctl registers)"); 4485 sc->sc_dma_ok = 0; 4486 } 4487 4488 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4489 WDC_CAPABILITY_MODE; 4490 if (sc->sc_dma_ok) { 4491 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4492 sc->sc_wdcdev.irqack = pciide_irqack; 4493 } 4494 sc->sc_wdcdev.PIO_cap = 4; 4495 sc->sc_wdcdev.DMA_cap = 2; 4496 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4497 4498 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4499 sc->sc_wdcdev.nchannels = 1; 4500 4501 /* Only one channel for this chip; if we are here it's enabled */ 4502 cp = &sc->pciide_channels[0]; 4503 sc->wdc_chanarray[0] = &cp->wdc_channel; 4504 cp->name = PCIIDE_CHANNEL_NAME(0); 4505 cp->wdc_channel.channel = 0; 4506 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4507 cp->wdc_channel.ch_queue = 4508 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 4509 if (cp->wdc_channel.ch_queue == NULL) { 4510 printf(": cannot allocate memory for command queue\n"); 4511 return; 4512 } 4513 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4514 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4515 "configured" : "wired"); 4516 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4517 printf("native-PCI\n"); 4518 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4519 pciide_pci_intr); 4520 } else { 4521 printf("compatibility\n"); 4522 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4523 &cmdsize, &ctlsize); 4524 } 4525 4526 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4527 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4528 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4529 if (cp->hw_ok == 0) 4530 return; 4531 wdcattach(&cp->wdc_channel); 4532 if (pciide_chan_candisable(cp)) { 4533 pci_conf_write(sc->sc_pc, sc->sc_tag, 4534 PCI_COMMAND_STATUS_REG, 0); 4535 } 4536 if (cp->hw_ok == 0) { 4537 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4538 interface); 4539 return; 4540 } 4541 4542 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4543 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4544 cy693_setup_channel(&cp->wdc_channel); 4545 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4546 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4547 } 4548 4549 void 4550 cy693_setup_channel(struct channel_softc *chp) 4551 { 4552 struct ata_drive_datas *drvp; 4553 int drive; 4554 u_int32_t cy_cmd_ctrl; 4555 u_int32_t idedma_ctl; 4556 struct pciide_channel *cp = (struct pciide_channel *)chp; 4557 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4558 int dma_mode = -1; 4559 struct pciide_cy *cy = sc->sc_cookie; 4560 4561 cy_cmd_ctrl = idedma_ctl = 0; 4562 4563 /* setup DMA if needed */ 4564 pciide_channel_dma_setup(cp); 4565 4566 for (drive = 0; drive < 2; drive++) { 4567 drvp = &chp->ch_drive[drive]; 4568 /* If no drive, skip */ 4569 if ((drvp->drive_flags & DRIVE) == 0) 4570 continue; 4571 /* add timing values, setup DMA if needed */ 4572 if (drvp->drive_flags & DRIVE_DMA) { 4573 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4574 /* use Multiword DMA */ 4575 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 4576 dma_mode = drvp->DMA_mode; 4577 } 4578 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4579 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 4580 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4581 CY_CMD_CTRL_IOW_REC_OFF(drive)); 4582 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4583 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 4584 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4585 CY_CMD_CTRL_IOR_REC_OFF(drive)); 4586 } 4587 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 4588 chp->ch_drive[0].DMA_mode = dma_mode; 4589 chp->ch_drive[1].DMA_mode = dma_mode; 4590 4591 if (dma_mode == -1) 4592 dma_mode = 0; 4593 4594 if (cy->cy_handle != NULL) { 4595 /* Note: `multiple' is implied. */ 4596 cy82c693_write(cy->cy_handle, 4597 (cy->cy_compatchan == 0) ? 4598 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 4599 } 4600 4601 pciide_print_modes(cp); 4602 4603 if (idedma_ctl != 0) { 4604 /* Add software bits in status register */ 4605 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4606 IDEDMA_CTL(chp->channel), idedma_ctl); 4607 } 4608 } 4609 4610 static struct sis_hostbr_type { 4611 u_int16_t id; 4612 u_int8_t rev; 4613 u_int8_t udma_mode; 4614 char *name; 4615 u_int8_t type; 4616 #define SIS_TYPE_NOUDMA 0 4617 #define SIS_TYPE_66 1 4618 #define SIS_TYPE_100OLD 2 4619 #define SIS_TYPE_100NEW 3 4620 #define SIS_TYPE_133OLD 4 4621 #define SIS_TYPE_133NEW 5 4622 #define SIS_TYPE_SOUTH 6 4623 } sis_hostbr_type[] = { 4624 /* Most infos here are from sos@freebsd.org */ 4625 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 4626 #if 0 4627 /* 4628 * controllers associated to a rev 0x2 530 Host to PCI Bridge 4629 * have problems with UDMA (info provided by Christos) 4630 */ 4631 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 4632 #endif 4633 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 4634 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 4635 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 4636 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 4637 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 4638 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 4639 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 4640 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 4641 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 4642 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 4643 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 4644 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 4645 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 4646 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 4647 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 4648 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 4649 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 4650 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 4651 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 4652 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 4653 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 4654 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 4655 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 4656 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 4657 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 4658 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 4659 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 4660 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 4661 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 4662 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 4663 /* 4664 * From sos@freebsd.org: the 0x961 ID will never be found in real world 4665 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 4666 */ 4667 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 4668 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 4669 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 4670 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW} 4671 }; 4672 4673 static struct sis_hostbr_type *sis_hostbr_type_match; 4674 4675 int 4676 sis_hostbr_match(struct pci_attach_args *pa) 4677 { 4678 int i; 4679 4680 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 4681 return (0); 4682 sis_hostbr_type_match = NULL; 4683 for (i = 0; 4684 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 4685 i++) { 4686 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 4687 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 4688 sis_hostbr_type_match = &sis_hostbr_type[i]; 4689 } 4690 return (sis_hostbr_type_match != NULL); 4691 } 4692 4693 int 4694 sis_south_match(struct pci_attach_args *pa) 4695 { 4696 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 4697 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 4698 PCI_REVISION(pa->pa_class) >= 0x10); 4699 } 4700 4701 void 4702 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4703 { 4704 struct pciide_channel *cp; 4705 int channel; 4706 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 4707 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4708 int rev = sc->sc_rev; 4709 bus_size_t cmdsize, ctlsize; 4710 struct pciide_sis *sis; 4711 4712 /* Allocate memory for private data */ 4713 sc->sc_cookie = malloc(sizeof(*sis), M_DEVBUF, M_NOWAIT | M_ZERO); 4714 sis = sc->sc_cookie; 4715 4716 pci_find_device(NULL, sis_hostbr_match); 4717 4718 if (sis_hostbr_type_match) { 4719 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 4720 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 4721 pciide_pci_read(sc->sc_pc, sc->sc_tag, 4722 SIS_REG_57) & 0x7f); 4723 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 4724 sis->sis_type = SIS_TYPE_133NEW; 4725 sc->sc_wdcdev.UDMA_cap = 4726 sis_hostbr_type_match->udma_mode; 4727 } else { 4728 if (pci_find_device(NULL, sis_south_match)) { 4729 sis->sis_type = SIS_TYPE_133OLD; 4730 sc->sc_wdcdev.UDMA_cap = 4731 sis_hostbr_type_match->udma_mode; 4732 } else { 4733 sis->sis_type = SIS_TYPE_100NEW; 4734 sc->sc_wdcdev.UDMA_cap = 4735 sis_hostbr_type_match->udma_mode; 4736 } 4737 } 4738 } else { 4739 sis->sis_type = sis_hostbr_type_match->type; 4740 sc->sc_wdcdev.UDMA_cap = 4741 sis_hostbr_type_match->udma_mode; 4742 } 4743 printf(": %s", sis_hostbr_type_match->name); 4744 } else { 4745 printf(": 5597/5598"); 4746 if (rev >= 0xd0) { 4747 sc->sc_wdcdev.UDMA_cap = 2; 4748 sis->sis_type = SIS_TYPE_66; 4749 } else { 4750 sc->sc_wdcdev.UDMA_cap = 0; 4751 sis->sis_type = SIS_TYPE_NOUDMA; 4752 } 4753 } 4754 4755 printf(": DMA"); 4756 pciide_mapreg_dma(sc, pa); 4757 4758 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4759 WDC_CAPABILITY_MODE; 4760 if (sc->sc_dma_ok) { 4761 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4762 sc->sc_wdcdev.irqack = pciide_irqack; 4763 if (sis->sis_type >= SIS_TYPE_66) 4764 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4765 } 4766 4767 sc->sc_wdcdev.PIO_cap = 4; 4768 sc->sc_wdcdev.DMA_cap = 2; 4769 4770 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4771 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4772 switch (sis->sis_type) { 4773 case SIS_TYPE_NOUDMA: 4774 case SIS_TYPE_66: 4775 case SIS_TYPE_100OLD: 4776 sc->sc_wdcdev.set_modes = sis_setup_channel; 4777 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 4778 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 4779 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 4780 break; 4781 case SIS_TYPE_100NEW: 4782 case SIS_TYPE_133OLD: 4783 sc->sc_wdcdev.set_modes = sis_setup_channel; 4784 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 4785 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 4786 break; 4787 case SIS_TYPE_133NEW: 4788 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 4789 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 4790 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 4791 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 4792 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 4793 break; 4794 } 4795 4796 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4797 4798 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4799 cp = &sc->pciide_channels[channel]; 4800 if (pciide_chansetup(sc, channel, interface) == 0) 4801 continue; 4802 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 4803 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 4804 printf("%s: %s ignored (disabled)\n", 4805 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4806 continue; 4807 } 4808 pciide_map_compat_intr(pa, cp, channel, interface); 4809 if (cp->hw_ok == 0) 4810 continue; 4811 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4812 pciide_pci_intr); 4813 if (cp->hw_ok == 0) { 4814 pciide_unmap_compat_intr(pa, cp, channel, interface); 4815 continue; 4816 } 4817 if (pciide_chan_candisable(cp)) { 4818 if (channel == 0) 4819 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 4820 else 4821 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 4822 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 4823 sis_ctr0); 4824 } 4825 if (cp->hw_ok == 0) { 4826 pciide_unmap_compat_intr(pa, cp, channel, interface); 4827 continue; 4828 } 4829 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4830 } 4831 } 4832 4833 void 4834 sis96x_setup_channel(struct channel_softc *chp) 4835 { 4836 struct ata_drive_datas *drvp; 4837 int drive; 4838 u_int32_t sis_tim; 4839 u_int32_t idedma_ctl; 4840 int regtim; 4841 struct pciide_channel *cp = (struct pciide_channel *)chp; 4842 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4843 4844 sis_tim = 0; 4845 idedma_ctl = 0; 4846 /* setup DMA if needed */ 4847 pciide_channel_dma_setup(cp); 4848 4849 for (drive = 0; drive < 2; drive++) { 4850 regtim = SIS_TIM133( 4851 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 4852 chp->channel, drive); 4853 drvp = &chp->ch_drive[drive]; 4854 /* If no drive, skip */ 4855 if ((drvp->drive_flags & DRIVE) == 0) 4856 continue; 4857 /* add timing values, setup DMA if needed */ 4858 if (drvp->drive_flags & DRIVE_UDMA) { 4859 /* use Ultra/DMA */ 4860 drvp->drive_flags &= ~DRIVE_DMA; 4861 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 4862 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 4863 if (drvp->UDMA_mode > 2) 4864 drvp->UDMA_mode = 2; 4865 } 4866 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 4867 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 4868 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4869 } else if (drvp->drive_flags & DRIVE_DMA) { 4870 /* 4871 * use Multiword DMA 4872 * Timings will be used for both PIO and DMA, 4873 * so adjust DMA mode if needed 4874 */ 4875 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 4876 drvp->PIO_mode = drvp->DMA_mode + 2; 4877 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 4878 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 4879 drvp->PIO_mode - 2 : 0; 4880 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 4881 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4882 } else { 4883 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 4884 } 4885 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 4886 "channel %d drive %d: 0x%x (reg 0x%x)\n", 4887 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 4888 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 4889 } 4890 if (idedma_ctl != 0) { 4891 /* Add software bits in status register */ 4892 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4893 IDEDMA_CTL(chp->channel), idedma_ctl); 4894 } 4895 pciide_print_modes(cp); 4896 } 4897 4898 void 4899 sis_setup_channel(struct channel_softc *chp) 4900 { 4901 struct ata_drive_datas *drvp; 4902 int drive; 4903 u_int32_t sis_tim; 4904 u_int32_t idedma_ctl; 4905 struct pciide_channel *cp = (struct pciide_channel *)chp; 4906 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4907 struct pciide_sis *sis = sc->sc_cookie; 4908 4909 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 4910 "channel %d 0x%x\n", chp->channel, 4911 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 4912 DEBUG_PROBE); 4913 sis_tim = 0; 4914 idedma_ctl = 0; 4915 /* setup DMA if needed */ 4916 pciide_channel_dma_setup(cp); 4917 4918 for (drive = 0; drive < 2; drive++) { 4919 drvp = &chp->ch_drive[drive]; 4920 /* If no drive, skip */ 4921 if ((drvp->drive_flags & DRIVE) == 0) 4922 continue; 4923 /* add timing values, setup DMA if needed */ 4924 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 4925 (drvp->drive_flags & DRIVE_UDMA) == 0) 4926 goto pio; 4927 4928 if (drvp->drive_flags & DRIVE_UDMA) { 4929 /* use Ultra/DMA */ 4930 drvp->drive_flags &= ~DRIVE_DMA; 4931 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 4932 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 4933 if (drvp->UDMA_mode > 2) 4934 drvp->UDMA_mode = 2; 4935 } 4936 switch (sis->sis_type) { 4937 case SIS_TYPE_66: 4938 case SIS_TYPE_100OLD: 4939 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 4940 SIS_TIM66_UDMA_TIME_OFF(drive); 4941 break; 4942 case SIS_TYPE_100NEW: 4943 sis_tim |= 4944 sis_udma100new_tim[drvp->UDMA_mode] << 4945 SIS_TIM100_UDMA_TIME_OFF(drive); 4946 break; 4947 case SIS_TYPE_133OLD: 4948 sis_tim |= 4949 sis_udma133old_tim[drvp->UDMA_mode] << 4950 SIS_TIM100_UDMA_TIME_OFF(drive); 4951 break; 4952 default: 4953 printf("unknown SiS IDE type %d\n", 4954 sis->sis_type); 4955 } 4956 } else { 4957 /* 4958 * use Multiword DMA 4959 * Timings will be used for both PIO and DMA, 4960 * so adjust DMA mode if needed 4961 */ 4962 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 4963 drvp->PIO_mode = drvp->DMA_mode + 2; 4964 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 4965 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 4966 drvp->PIO_mode - 2 : 0; 4967 if (drvp->DMA_mode == 0) 4968 drvp->PIO_mode = 0; 4969 } 4970 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4971 pio: switch (sis->sis_type) { 4972 case SIS_TYPE_NOUDMA: 4973 case SIS_TYPE_66: 4974 case SIS_TYPE_100OLD: 4975 sis_tim |= sis_pio_act[drvp->PIO_mode] << 4976 SIS_TIM66_ACT_OFF(drive); 4977 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 4978 SIS_TIM66_REC_OFF(drive); 4979 break; 4980 case SIS_TYPE_100NEW: 4981 case SIS_TYPE_133OLD: 4982 sis_tim |= sis_pio_act[drvp->PIO_mode] << 4983 SIS_TIM100_ACT_OFF(drive); 4984 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 4985 SIS_TIM100_REC_OFF(drive); 4986 break; 4987 default: 4988 printf("unknown SiS IDE type %d\n", 4989 sis->sis_type); 4990 } 4991 } 4992 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 4993 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 4994 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 4995 if (idedma_ctl != 0) { 4996 /* Add software bits in status register */ 4997 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4998 IDEDMA_CTL(chp->channel), idedma_ctl); 4999 } 5000 pciide_print_modes(cp); 5001 } 5002 5003 void 5004 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5005 { 5006 struct pciide_channel *cp; 5007 int channel; 5008 pcireg_t interface, ctl; 5009 bus_size_t cmdsize, ctlsize; 5010 5011 printf(": DMA"); 5012 pciide_mapreg_dma(sc, pa); 5013 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 5014 5015 if (sc->sc_dma_ok) { 5016 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5017 sc->sc_wdcdev.irqack = natsemi_irqack; 5018 } 5019 5020 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5021 5022 /* 5023 * Mask off interrupts from both channels, appropriate channel(s) 5024 * will be unmasked later. 5025 */ 5026 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5027 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5028 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5029 5030 sc->sc_wdcdev.PIO_cap = 4; 5031 sc->sc_wdcdev.DMA_cap = 2; 5032 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5033 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5034 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5035 5036 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5037 PCI_CLASS_REG)); 5038 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5039 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5040 5041 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5042 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5043 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5044 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5045 else 5046 ctl |= NATSEMI_CTRL1_INTAMASK; 5047 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5048 5049 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5050 cp = &sc->pciide_channels[channel]; 5051 if (pciide_chansetup(sc, channel, interface) == 0) 5052 continue; 5053 5054 pciide_map_compat_intr(pa, cp, channel, interface); 5055 if (cp->hw_ok == 0) 5056 continue; 5057 5058 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5059 natsemi_pci_intr); 5060 if (cp->hw_ok == 0) { 5061 pciide_unmap_compat_intr(pa, cp, channel, interface); 5062 continue; 5063 } 5064 natsemi_setup_channel(&cp->wdc_channel); 5065 } 5066 } 5067 5068 void 5069 natsemi_setup_channel(struct channel_softc *chp) 5070 { 5071 struct ata_drive_datas *drvp; 5072 int drive, ndrives = 0; 5073 u_int32_t idedma_ctl = 0; 5074 struct pciide_channel *cp = (struct pciide_channel *)chp; 5075 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5076 u_int8_t tim; 5077 5078 /* setup DMA if needed */ 5079 pciide_channel_dma_setup(cp); 5080 5081 for (drive = 0; drive < 2; drive++) { 5082 drvp = &chp->ch_drive[drive]; 5083 /* If no drive, skip */ 5084 if ((drvp->drive_flags & DRIVE) == 0) 5085 continue; 5086 5087 ndrives++; 5088 /* add timing values, setup DMA if needed */ 5089 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5090 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5091 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5092 } else { 5093 /* 5094 * use Multiword DMA 5095 * Timings will be used for both PIO and DMA, 5096 * so adjust DMA mode if needed 5097 */ 5098 if (drvp->PIO_mode >= 3 && 5099 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5100 drvp->DMA_mode = drvp->PIO_mode - 2; 5101 } 5102 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5103 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5104 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5105 } 5106 5107 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5108 NATSEMI_RTREG(chp->channel, drive), tim); 5109 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5110 NATSEMI_WTREG(chp->channel, drive), tim); 5111 } 5112 if (idedma_ctl != 0) { 5113 /* Add software bits in status register */ 5114 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5115 IDEDMA_CTL(chp->channel), idedma_ctl); 5116 } 5117 if (ndrives > 0) { 5118 /* Unmask the channel if at least one drive is found */ 5119 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5120 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5121 ~(NATSEMI_CHMASK(chp->channel))); 5122 } 5123 5124 pciide_print_modes(cp); 5125 5126 /* Go ahead and ack interrupts generated during probe. */ 5127 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5128 IDEDMA_CTL(chp->channel), 5129 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5130 IDEDMA_CTL(chp->channel))); 5131 } 5132 5133 void 5134 natsemi_irqack(struct channel_softc *chp) 5135 { 5136 struct pciide_channel *cp = (struct pciide_channel *)chp; 5137 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5138 u_int8_t clr; 5139 5140 /* The "clear" bits are in the wrong register *sigh* */ 5141 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5142 IDEDMA_CMD(chp->channel)); 5143 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5144 IDEDMA_CTL(chp->channel)) & 5145 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5146 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5147 IDEDMA_CMD(chp->channel), clr); 5148 } 5149 5150 int 5151 natsemi_pci_intr(void *arg) 5152 { 5153 struct pciide_softc *sc = arg; 5154 struct pciide_channel *cp; 5155 struct channel_softc *wdc_cp; 5156 int i, rv, crv; 5157 u_int8_t msk; 5158 5159 rv = 0; 5160 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5161 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5162 cp = &sc->pciide_channels[i]; 5163 wdc_cp = &cp->wdc_channel; 5164 5165 /* If a compat channel skip. */ 5166 if (cp->compat) 5167 continue; 5168 5169 /* If this channel is masked, skip it. */ 5170 if (msk & NATSEMI_CHMASK(i)) 5171 continue; 5172 5173 if (pciide_intr_flag(cp) == 0) 5174 continue; 5175 5176 crv = wdcintr(wdc_cp); 5177 if (crv == 0) 5178 ; /* leave rv alone */ 5179 else if (crv == 1) 5180 rv = 1; /* claim the intr */ 5181 else if (rv == 0) /* crv should be -1 in this case */ 5182 rv = crv; /* if we've done no better, take it */ 5183 } 5184 return (rv); 5185 } 5186 5187 void 5188 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5189 { 5190 struct pciide_channel *cp; 5191 int channel; 5192 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5193 bus_size_t cmdsize, ctlsize; 5194 5195 printf(": DMA"); 5196 pciide_mapreg_dma(sc, pa); 5197 5198 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5199 WDC_CAPABILITY_MODE; 5200 if (sc->sc_dma_ok) { 5201 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5202 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5203 sc->sc_wdcdev.irqack = pciide_irqack; 5204 } 5205 sc->sc_wdcdev.PIO_cap = 4; 5206 sc->sc_wdcdev.DMA_cap = 2; 5207 sc->sc_wdcdev.UDMA_cap = 2; 5208 5209 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5210 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5211 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5212 5213 /* 5214 * Soekris net4801 errata 0003: 5215 * 5216 * The SC1100 built in busmaster IDE controller is pretty standard, 5217 * but have two bugs: data transfers need to be dword aligned and 5218 * it cannot do an exact 64Kbyte data transfer. 5219 * 5220 * Assume that reducing maximum segment size by one page 5221 * will be enough, and restrict boundary too for extra certainty. 5222 */ 5223 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5224 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5225 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5226 } 5227 5228 /* 5229 * This chip seems to be unable to do one-sector transfers 5230 * using DMA. 5231 */ 5232 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5233 5234 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5235 5236 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5237 cp = &sc->pciide_channels[channel]; 5238 if (pciide_chansetup(sc, channel, interface) == 0) 5239 continue; 5240 pciide_map_compat_intr(pa, cp, channel, interface); 5241 if (cp->hw_ok == 0) 5242 continue; 5243 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5244 pciide_pci_intr); 5245 if (cp->hw_ok == 0) { 5246 pciide_unmap_compat_intr(pa, cp, channel, interface); 5247 continue; 5248 } 5249 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5250 } 5251 } 5252 5253 void 5254 ns_scx200_setup_channel(struct channel_softc *chp) 5255 { 5256 struct ata_drive_datas *drvp; 5257 int drive, mode; 5258 u_int32_t idedma_ctl; 5259 struct pciide_channel *cp = (struct pciide_channel*)chp; 5260 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5261 int channel = chp->channel; 5262 int pioformat; 5263 pcireg_t piotim, dmatim; 5264 5265 /* Setup DMA if needed */ 5266 pciide_channel_dma_setup(cp); 5267 5268 idedma_ctl = 0; 5269 5270 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5271 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5272 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5273 DEBUG_PROBE); 5274 5275 /* Per channel settings */ 5276 for (drive = 0; drive < 2; drive++) { 5277 drvp = &chp->ch_drive[drive]; 5278 5279 /* If no drive, skip */ 5280 if ((drvp->drive_flags & DRIVE) == 0) 5281 continue; 5282 5283 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5284 SCx200_TIM_PIO(channel, drive)); 5285 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5286 SCx200_TIM_DMA(channel, drive)); 5287 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5288 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5289 piotim, dmatim), DEBUG_PROBE); 5290 5291 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5292 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5293 /* Setup UltraDMA mode */ 5294 drvp->drive_flags &= ~DRIVE_DMA; 5295 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5296 dmatim = scx200_udma33[drvp->UDMA_mode]; 5297 mode = drvp->PIO_mode; 5298 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5299 (drvp->drive_flags & DRIVE_DMA) != 0) { 5300 /* Setup multiword DMA mode */ 5301 drvp->drive_flags &= ~DRIVE_UDMA; 5302 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5303 dmatim = scx200_dma33[drvp->DMA_mode]; 5304 5305 /* mode = min(pio, dma + 2) */ 5306 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5307 mode = drvp->PIO_mode; 5308 else 5309 mode = drvp->DMA_mode + 2; 5310 } else { 5311 mode = drvp->PIO_mode; 5312 } 5313 5314 /* Setup PIO mode */ 5315 drvp->PIO_mode = mode; 5316 if (mode < 2) 5317 drvp->DMA_mode = 0; 5318 else 5319 drvp->DMA_mode = mode - 2; 5320 5321 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5322 5323 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5324 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5325 piotim, dmatim), DEBUG_PROBE); 5326 5327 pci_conf_write(sc->sc_pc, sc->sc_tag, 5328 SCx200_TIM_PIO(channel, drive), piotim); 5329 pci_conf_write(sc->sc_pc, sc->sc_tag, 5330 SCx200_TIM_DMA(channel, drive), dmatim); 5331 } 5332 5333 if (idedma_ctl != 0) { 5334 /* Add software bits in status register */ 5335 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5336 IDEDMA_CTL(channel), idedma_ctl); 5337 } 5338 5339 pciide_print_modes(cp); 5340 } 5341 5342 void 5343 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5344 { 5345 struct pciide_channel *cp; 5346 int channel; 5347 pcireg_t cr, interface; 5348 bus_size_t cmdsize, ctlsize; 5349 int rev = sc->sc_rev; 5350 5351 printf(": DMA"); 5352 pciide_mapreg_dma(sc, pa); 5353 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5354 WDC_CAPABILITY_MODE; 5355 5356 if (sc->sc_dma_ok) { 5357 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5358 if (rev >= 0x20) { 5359 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5360 if (rev >= 0xC4) 5361 sc->sc_wdcdev.UDMA_cap = 5; 5362 else if (rev >= 0xC2) 5363 sc->sc_wdcdev.UDMA_cap = 4; 5364 else 5365 sc->sc_wdcdev.UDMA_cap = 2; 5366 } 5367 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5368 sc->sc_wdcdev.irqack = pciide_irqack; 5369 } 5370 5371 sc->sc_wdcdev.PIO_cap = 4; 5372 sc->sc_wdcdev.DMA_cap = 2; 5373 sc->sc_wdcdev.set_modes = acer_setup_channel; 5374 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5375 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5376 5377 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5378 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5379 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5380 5381 /* Enable "microsoft register bits" R/W. */ 5382 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5383 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5384 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5385 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5386 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5387 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5388 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5389 ~ACER_CHANSTATUSREGS_RO); 5390 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5391 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5392 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5393 /* Don't use cr, re-read the real register content instead */ 5394 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5395 PCI_CLASS_REG)); 5396 5397 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5398 5399 /* From linux: enable "Cable Detection" */ 5400 if (rev >= 0xC2) 5401 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5402 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5403 | ACER_0x4B_CDETECT); 5404 5405 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5406 cp = &sc->pciide_channels[channel]; 5407 if (pciide_chansetup(sc, channel, interface) == 0) 5408 continue; 5409 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5410 printf("%s: %s ignored (disabled)\n", 5411 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5412 continue; 5413 } 5414 pciide_map_compat_intr(pa, cp, channel, interface); 5415 if (cp->hw_ok == 0) 5416 continue; 5417 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5418 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5419 if (cp->hw_ok == 0) { 5420 pciide_unmap_compat_intr(pa, cp, channel, interface); 5421 continue; 5422 } 5423 if (pciide_chan_candisable(cp)) { 5424 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5425 pci_conf_write(sc->sc_pc, sc->sc_tag, 5426 PCI_CLASS_REG, cr); 5427 } 5428 if (cp->hw_ok == 0) { 5429 pciide_unmap_compat_intr(pa, cp, channel, interface); 5430 continue; 5431 } 5432 acer_setup_channel(&cp->wdc_channel); 5433 } 5434 } 5435 5436 void 5437 acer_setup_channel(struct channel_softc *chp) 5438 { 5439 struct ata_drive_datas *drvp; 5440 int drive; 5441 u_int32_t acer_fifo_udma; 5442 u_int32_t idedma_ctl; 5443 struct pciide_channel *cp = (struct pciide_channel *)chp; 5444 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5445 5446 idedma_ctl = 0; 5447 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5448 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5449 acer_fifo_udma), DEBUG_PROBE); 5450 /* setup DMA if needed */ 5451 pciide_channel_dma_setup(cp); 5452 5453 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5454 DRIVE_UDMA) { /* check 80 pins cable */ 5455 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5456 ACER_0x4A_80PIN(chp->channel)) { 5457 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5458 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5459 DEBUG_PROBE); 5460 if (chp->ch_drive[0].UDMA_mode > 2) 5461 chp->ch_drive[0].UDMA_mode = 2; 5462 if (chp->ch_drive[1].UDMA_mode > 2) 5463 chp->ch_drive[1].UDMA_mode = 2; 5464 } 5465 } 5466 5467 for (drive = 0; drive < 2; drive++) { 5468 drvp = &chp->ch_drive[drive]; 5469 /* If no drive, skip */ 5470 if ((drvp->drive_flags & DRIVE) == 0) 5471 continue; 5472 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5473 "channel %d drive %d 0x%x\n", chp->channel, drive, 5474 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5475 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5476 /* clear FIFO/DMA mode */ 5477 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5478 ACER_UDMA_EN(chp->channel, drive) | 5479 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5480 5481 /* add timing values, setup DMA if needed */ 5482 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5483 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5484 acer_fifo_udma |= 5485 ACER_FTH_OPL(chp->channel, drive, 0x1); 5486 goto pio; 5487 } 5488 5489 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5490 if (drvp->drive_flags & DRIVE_UDMA) { 5491 /* use Ultra/DMA */ 5492 drvp->drive_flags &= ~DRIVE_DMA; 5493 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5494 acer_fifo_udma |= 5495 ACER_UDMA_TIM(chp->channel, drive, 5496 acer_udma[drvp->UDMA_mode]); 5497 /* XXX disable if one drive < UDMA3 ? */ 5498 if (drvp->UDMA_mode >= 3) { 5499 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5500 ACER_0x4B, 5501 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5502 ACER_0x4B) | ACER_0x4B_UDMA66); 5503 } 5504 } else { 5505 /* 5506 * use Multiword DMA 5507 * Timings will be used for both PIO and DMA, 5508 * so adjust DMA mode if needed 5509 */ 5510 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5511 drvp->PIO_mode = drvp->DMA_mode + 2; 5512 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5513 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5514 drvp->PIO_mode - 2 : 0; 5515 if (drvp->DMA_mode == 0) 5516 drvp->PIO_mode = 0; 5517 } 5518 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5519 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5520 ACER_IDETIM(chp->channel, drive), 5521 acer_pio[drvp->PIO_mode]); 5522 } 5523 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5524 acer_fifo_udma), DEBUG_PROBE); 5525 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5526 if (idedma_ctl != 0) { 5527 /* Add software bits in status register */ 5528 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5529 IDEDMA_CTL(chp->channel), idedma_ctl); 5530 } 5531 pciide_print_modes(cp); 5532 } 5533 5534 int 5535 acer_pci_intr(void *arg) 5536 { 5537 struct pciide_softc *sc = arg; 5538 struct pciide_channel *cp; 5539 struct channel_softc *wdc_cp; 5540 int i, rv, crv; 5541 u_int32_t chids; 5542 5543 rv = 0; 5544 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5545 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5546 cp = &sc->pciide_channels[i]; 5547 wdc_cp = &cp->wdc_channel; 5548 /* If a compat channel skip. */ 5549 if (cp->compat) 5550 continue; 5551 if (chids & ACER_CHIDS_INT(i)) { 5552 crv = wdcintr(wdc_cp); 5553 if (crv == 0) 5554 printf("%s:%d: bogus intr\n", 5555 sc->sc_wdcdev.sc_dev.dv_xname, i); 5556 else 5557 rv = 1; 5558 } 5559 } 5560 return (rv); 5561 } 5562 5563 void 5564 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5565 { 5566 struct pciide_channel *cp; 5567 int i, compatchan, revision; 5568 pcireg_t interface; 5569 bus_size_t cmdsize, ctlsize; 5570 5571 revision = sc->sc_rev; 5572 5573 /* 5574 * when the chip is in native mode it identifies itself as a 5575 * 'misc mass storage'. Fake interface in this case. 5576 */ 5577 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 5578 interface = PCI_INTERFACE(pa->pa_class); 5579 } else { 5580 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 5581 PCIIDE_INTERFACE_PCI(0); 5582 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5583 (revision == HPT370_REV || revision == HPT370A_REV || 5584 revision == HPT372_REV)) || 5585 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5586 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5587 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5588 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5589 interface |= PCIIDE_INTERFACE_PCI(1); 5590 } 5591 5592 printf(": DMA"); 5593 pciide_mapreg_dma(sc, pa); 5594 printf("\n"); 5595 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5596 WDC_CAPABILITY_MODE; 5597 if (sc->sc_dma_ok) { 5598 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5599 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5600 sc->sc_wdcdev.irqack = pciide_irqack; 5601 } 5602 sc->sc_wdcdev.PIO_cap = 4; 5603 sc->sc_wdcdev.DMA_cap = 2; 5604 5605 sc->sc_wdcdev.set_modes = hpt_setup_channel; 5606 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5607 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5608 revision == HPT366_REV) { 5609 sc->sc_wdcdev.UDMA_cap = 4; 5610 /* 5611 * The 366 has 2 PCI IDE functions, one for primary and one 5612 * for secondary. So we need to call pciide_mapregs_compat() 5613 * with the real channel 5614 */ 5615 if (pa->pa_function == 0) { 5616 compatchan = 0; 5617 } else if (pa->pa_function == 1) { 5618 compatchan = 1; 5619 } else { 5620 printf("%s: unexpected PCI function %d\n", 5621 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 5622 return; 5623 } 5624 sc->sc_wdcdev.nchannels = 1; 5625 } else { 5626 sc->sc_wdcdev.nchannels = 2; 5627 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5628 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5629 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5630 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5631 sc->sc_wdcdev.UDMA_cap = 6; 5632 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 5633 if (revision == HPT372_REV) 5634 sc->sc_wdcdev.UDMA_cap = 6; 5635 else 5636 sc->sc_wdcdev.UDMA_cap = 5; 5637 } 5638 } 5639 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5640 cp = &sc->pciide_channels[i]; 5641 if (sc->sc_wdcdev.nchannels > 1) { 5642 compatchan = i; 5643 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 5644 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 5645 printf("%s: %s ignored (disabled)\n", 5646 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5647 continue; 5648 } 5649 } 5650 if (pciide_chansetup(sc, i, interface) == 0) 5651 continue; 5652 if (interface & PCIIDE_INTERFACE_PCI(i)) { 5653 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 5654 &ctlsize, hpt_pci_intr); 5655 } else { 5656 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 5657 &cmdsize, &ctlsize); 5658 } 5659 if (cp->hw_ok == 0) 5660 return; 5661 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 5662 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 5663 wdcattach(&cp->wdc_channel); 5664 hpt_setup_channel(&cp->wdc_channel); 5665 } 5666 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5667 (revision == HPT370_REV || revision == HPT370A_REV || 5668 revision == HPT372_REV)) || 5669 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5670 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5671 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5672 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 5673 /* 5674 * Turn off fast interrupts 5675 */ 5676 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 5677 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 5678 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5679 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 5680 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 5681 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5682 5683 /* 5684 * HPT370 and highter has a bit to disable interrupts, 5685 * make sure to clear it 5686 */ 5687 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 5688 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 5689 ~HPT_CSEL_IRQDIS); 5690 } 5691 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 5692 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5693 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5694 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5695 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 5696 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5697 revision == HPT372_REV)) 5698 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 5699 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 5700 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 5701 5702 return; 5703 } 5704 5705 void 5706 hpt_setup_channel(struct channel_softc *chp) 5707 { 5708 struct ata_drive_datas *drvp; 5709 int drive; 5710 int cable; 5711 u_int32_t before, after; 5712 u_int32_t idedma_ctl; 5713 struct pciide_channel *cp = (struct pciide_channel *)chp; 5714 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5715 int revision = sc->sc_rev; 5716 u_int32_t *tim_pio, *tim_dma, *tim_udma; 5717 5718 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 5719 5720 /* setup DMA if needed */ 5721 pciide_channel_dma_setup(cp); 5722 5723 idedma_ctl = 0; 5724 5725 switch (sc->sc_pp->ide_product) { 5726 case PCI_PRODUCT_TRIONES_HPT366: 5727 if (revision == HPT370_REV || 5728 revision == HPT370A_REV) { 5729 tim_pio = hpt370_pio; 5730 tim_dma = hpt370_dma; 5731 tim_udma = hpt370_udma; 5732 } else if (revision == HPT372_REV) { 5733 tim_pio = hpt372_pio; 5734 tim_dma = hpt372_dma; 5735 tim_udma = hpt372_udma; 5736 } else { 5737 tim_pio = hpt366_pio; 5738 tim_dma = hpt366_dma; 5739 tim_udma = hpt366_udma; 5740 } 5741 break; 5742 case PCI_PRODUCT_TRIONES_HPT372A: 5743 case PCI_PRODUCT_TRIONES_HPT302: 5744 case PCI_PRODUCT_TRIONES_HPT371: 5745 tim_pio = hpt372_pio; 5746 tim_dma = hpt372_dma; 5747 tim_udma = hpt372_udma; 5748 break; 5749 case PCI_PRODUCT_TRIONES_HPT374: 5750 tim_pio = hpt374_pio; 5751 tim_dma = hpt374_dma; 5752 tim_udma = hpt374_udma; 5753 break; 5754 default: 5755 printf("%s: no known timing values\n", 5756 sc->sc_wdcdev.sc_dev.dv_xname); 5757 goto end; 5758 } 5759 5760 /* Per drive settings */ 5761 for (drive = 0; drive < 2; drive++) { 5762 drvp = &chp->ch_drive[drive]; 5763 /* If no drive, skip */ 5764 if ((drvp->drive_flags & DRIVE) == 0) 5765 continue; 5766 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 5767 HPT_IDETIM(chp->channel, drive)); 5768 5769 /* add timing values, setup DMA if needed */ 5770 if (drvp->drive_flags & DRIVE_UDMA) { 5771 /* use Ultra/DMA */ 5772 drvp->drive_flags &= ~DRIVE_DMA; 5773 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 5774 drvp->UDMA_mode > 2) { 5775 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5776 "cable not detected\n", drvp->drive_name, 5777 sc->sc_wdcdev.sc_dev.dv_xname, 5778 chp->channel, drive), DEBUG_PROBE); 5779 drvp->UDMA_mode = 2; 5780 } 5781 after = tim_udma[drvp->UDMA_mode]; 5782 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5783 } else if (drvp->drive_flags & DRIVE_DMA) { 5784 /* 5785 * use Multiword DMA. 5786 * Timings will be used for both PIO and DMA, so adjust 5787 * DMA mode if needed 5788 */ 5789 if (drvp->PIO_mode >= 3 && 5790 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5791 drvp->DMA_mode = drvp->PIO_mode - 2; 5792 } 5793 after = tim_dma[drvp->DMA_mode]; 5794 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5795 } else { 5796 /* PIO only */ 5797 after = tim_pio[drvp->PIO_mode]; 5798 } 5799 pci_conf_write(sc->sc_pc, sc->sc_tag, 5800 HPT_IDETIM(chp->channel, drive), after); 5801 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 5802 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 5803 after, before), DEBUG_PROBE); 5804 } 5805 end: 5806 if (idedma_ctl != 0) { 5807 /* Add software bits in status register */ 5808 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5809 IDEDMA_CTL(chp->channel), idedma_ctl); 5810 } 5811 pciide_print_modes(cp); 5812 } 5813 5814 int 5815 hpt_pci_intr(void *arg) 5816 { 5817 struct pciide_softc *sc = arg; 5818 struct pciide_channel *cp; 5819 struct channel_softc *wdc_cp; 5820 int rv = 0; 5821 int dmastat, i, crv; 5822 5823 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5824 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5825 IDEDMA_CTL(i)); 5826 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 5827 IDEDMA_CTL_INTR) 5828 continue; 5829 cp = &sc->pciide_channels[i]; 5830 wdc_cp = &cp->wdc_channel; 5831 crv = wdcintr(wdc_cp); 5832 if (crv == 0) { 5833 printf("%s:%d: bogus intr\n", 5834 sc->sc_wdcdev.sc_dev.dv_xname, i); 5835 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5836 IDEDMA_CTL(i), dmastat); 5837 } else 5838 rv = 1; 5839 } 5840 return (rv); 5841 } 5842 5843 /* Macros to test product */ 5844 #define PDC_IS_262(sc) \ 5845 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 5846 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 5847 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 5848 #define PDC_IS_265(sc) \ 5849 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 5850 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 5851 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 5852 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 5853 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5854 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5855 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5856 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5857 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5858 #define PDC_IS_268(sc) \ 5859 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 5860 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 5861 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5862 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5863 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5864 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5865 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5866 #define PDC_IS_269(sc) \ 5867 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5868 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5869 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5870 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5871 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5872 5873 u_int8_t 5874 pdc268_config_read(struct channel_softc *chp, int index) 5875 { 5876 struct pciide_channel *cp = (struct pciide_channel *)chp; 5877 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5878 int channel = chp->channel; 5879 5880 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5881 PDC268_INDEX(channel), index); 5882 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5883 PDC268_DATA(channel))); 5884 } 5885 5886 void 5887 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5888 { 5889 struct pciide_channel *cp; 5890 int channel; 5891 pcireg_t interface, st, mode; 5892 bus_size_t cmdsize, ctlsize; 5893 5894 if (!PDC_IS_268(sc)) { 5895 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 5896 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 5897 st), DEBUG_PROBE); 5898 } 5899 5900 /* turn off RAID mode */ 5901 if (!PDC_IS_268(sc)) 5902 st &= ~PDC2xx_STATE_IDERAID; 5903 5904 /* 5905 * can't rely on the PCI_CLASS_REG content if the chip was in raid 5906 * mode. We have to fake interface 5907 */ 5908 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 5909 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 5910 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 5911 5912 printf(": DMA"); 5913 pciide_mapreg_dma(sc, pa); 5914 5915 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5916 WDC_CAPABILITY_MODE; 5917 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 5918 PDC_IS_262(sc)) 5919 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 5920 if (sc->sc_dma_ok) { 5921 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5922 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5923 sc->sc_wdcdev.irqack = pciide_irqack; 5924 } 5925 sc->sc_wdcdev.PIO_cap = 4; 5926 sc->sc_wdcdev.DMA_cap = 2; 5927 if (PDC_IS_269(sc)) 5928 sc->sc_wdcdev.UDMA_cap = 6; 5929 else if (PDC_IS_265(sc)) 5930 sc->sc_wdcdev.UDMA_cap = 5; 5931 else if (PDC_IS_262(sc)) 5932 sc->sc_wdcdev.UDMA_cap = 4; 5933 else 5934 sc->sc_wdcdev.UDMA_cap = 2; 5935 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 5936 pdc20268_setup_channel : pdc202xx_setup_channel; 5937 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5938 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5939 5940 if (PDC_IS_262(sc)) { 5941 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 5942 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 5943 } 5944 5945 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5946 if (!PDC_IS_268(sc)) { 5947 /* setup failsafe defaults */ 5948 mode = 0; 5949 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 5950 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 5951 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 5952 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 5953 for (channel = 0; 5954 channel < sc->sc_wdcdev.nchannels; 5955 channel++) { 5956 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 5957 "drive 0 initial timings 0x%x, now 0x%x\n", 5958 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 5959 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 5960 DEBUG_PROBE); 5961 pci_conf_write(sc->sc_pc, sc->sc_tag, 5962 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 5963 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 5964 "drive 1 initial timings 0x%x, now 0x%x\n", 5965 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 5966 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 5967 pci_conf_write(sc->sc_pc, sc->sc_tag, 5968 PDC2xx_TIM(channel, 1), mode); 5969 } 5970 5971 mode = PDC2xx_SCR_DMA; 5972 if (PDC_IS_262(sc)) { 5973 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 5974 } else { 5975 /* the BIOS set it up this way */ 5976 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 5977 } 5978 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 5979 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 5980 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 5981 "now 0x%x\n", 5982 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 5983 PDC2xx_SCR), 5984 mode), DEBUG_PROBE); 5985 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 5986 PDC2xx_SCR, mode); 5987 5988 /* controller initial state register is OK even without BIOS */ 5989 /* Set DMA mode to IDE DMA compatibility */ 5990 mode = 5991 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 5992 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 5993 DEBUG_PROBE); 5994 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 5995 mode | 0x1); 5996 mode = 5997 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 5998 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 5999 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 6000 mode | 0x1); 6001 } 6002 6003 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6004 cp = &sc->pciide_channels[channel]; 6005 if (pciide_chansetup(sc, channel, interface) == 0) 6006 continue; 6007 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 6008 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 6009 printf("%s: %s ignored (disabled)\n", 6010 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6011 continue; 6012 } 6013 pciide_map_compat_intr(pa, cp, channel, interface); 6014 if (cp->hw_ok == 0) 6015 continue; 6016 if (PDC_IS_265(sc)) 6017 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6018 pdc20265_pci_intr); 6019 else 6020 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6021 pdc202xx_pci_intr); 6022 if (cp->hw_ok == 0) { 6023 pciide_unmap_compat_intr(pa, cp, channel, interface); 6024 continue; 6025 } 6026 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6027 st &= ~(PDC_IS_262(sc) ? 6028 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6029 pciide_unmap_compat_intr(pa, cp, channel, interface); 6030 } 6031 if (PDC_IS_268(sc)) 6032 pdc20268_setup_channel(&cp->wdc_channel); 6033 else 6034 pdc202xx_setup_channel(&cp->wdc_channel); 6035 } 6036 if (!PDC_IS_268(sc)) { 6037 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6038 "0x%x\n", st), DEBUG_PROBE); 6039 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6040 } 6041 return; 6042 } 6043 6044 void 6045 pdc202xx_setup_channel(struct channel_softc *chp) 6046 { 6047 struct ata_drive_datas *drvp; 6048 int drive; 6049 pcireg_t mode, st; 6050 u_int32_t idedma_ctl, scr, atapi; 6051 struct pciide_channel *cp = (struct pciide_channel *)chp; 6052 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6053 int channel = chp->channel; 6054 6055 /* setup DMA if needed */ 6056 pciide_channel_dma_setup(cp); 6057 6058 idedma_ctl = 0; 6059 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6060 sc->sc_wdcdev.sc_dev.dv_xname, 6061 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6062 DEBUG_PROBE); 6063 6064 /* Per channel settings */ 6065 if (PDC_IS_262(sc)) { 6066 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6067 PDC262_U66); 6068 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6069 /* Check cable */ 6070 if ((st & PDC262_STATE_80P(channel)) != 0 && 6071 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6072 chp->ch_drive[0].UDMA_mode > 2) || 6073 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6074 chp->ch_drive[1].UDMA_mode > 2))) { 6075 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6076 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6077 DEBUG_PROBE); 6078 if (chp->ch_drive[0].UDMA_mode > 2) 6079 chp->ch_drive[0].UDMA_mode = 2; 6080 if (chp->ch_drive[1].UDMA_mode > 2) 6081 chp->ch_drive[1].UDMA_mode = 2; 6082 } 6083 /* Trim UDMA mode */ 6084 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6085 chp->ch_drive[0].UDMA_mode <= 2) || 6086 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6087 chp->ch_drive[1].UDMA_mode <= 2)) { 6088 if (chp->ch_drive[0].UDMA_mode > 2) 6089 chp->ch_drive[0].UDMA_mode = 2; 6090 if (chp->ch_drive[1].UDMA_mode > 2) 6091 chp->ch_drive[1].UDMA_mode = 2; 6092 } 6093 /* Set U66 if needed */ 6094 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6095 chp->ch_drive[0].UDMA_mode > 2) || 6096 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6097 chp->ch_drive[1].UDMA_mode > 2)) 6098 scr |= PDC262_U66_EN(channel); 6099 else 6100 scr &= ~PDC262_U66_EN(channel); 6101 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6102 PDC262_U66, scr); 6103 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6104 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6105 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6106 PDC262_ATAPI(channel))), DEBUG_PROBE); 6107 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6108 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6109 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6110 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6111 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6112 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6113 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6114 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6115 atapi = 0; 6116 else 6117 atapi = PDC262_ATAPI_UDMA; 6118 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6119 PDC262_ATAPI(channel), atapi); 6120 } 6121 } 6122 for (drive = 0; drive < 2; drive++) { 6123 drvp = &chp->ch_drive[drive]; 6124 /* If no drive, skip */ 6125 if ((drvp->drive_flags & DRIVE) == 0) 6126 continue; 6127 mode = 0; 6128 if (drvp->drive_flags & DRIVE_UDMA) { 6129 /* use Ultra/DMA */ 6130 drvp->drive_flags &= ~DRIVE_DMA; 6131 mode = PDC2xx_TIM_SET_MB(mode, 6132 pdc2xx_udma_mb[drvp->UDMA_mode]); 6133 mode = PDC2xx_TIM_SET_MC(mode, 6134 pdc2xx_udma_mc[drvp->UDMA_mode]); 6135 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6136 } else if (drvp->drive_flags & DRIVE_DMA) { 6137 mode = PDC2xx_TIM_SET_MB(mode, 6138 pdc2xx_dma_mb[drvp->DMA_mode]); 6139 mode = PDC2xx_TIM_SET_MC(mode, 6140 pdc2xx_dma_mc[drvp->DMA_mode]); 6141 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6142 } else { 6143 mode = PDC2xx_TIM_SET_MB(mode, 6144 pdc2xx_dma_mb[0]); 6145 mode = PDC2xx_TIM_SET_MC(mode, 6146 pdc2xx_dma_mc[0]); 6147 } 6148 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6149 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6150 if (drvp->drive_flags & DRIVE_ATA) 6151 mode |= PDC2xx_TIM_PRE; 6152 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6153 if (drvp->PIO_mode >= 3) { 6154 mode |= PDC2xx_TIM_IORDY; 6155 if (drive == 0) 6156 mode |= PDC2xx_TIM_IORDYp; 6157 } 6158 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6159 "timings 0x%x\n", 6160 sc->sc_wdcdev.sc_dev.dv_xname, 6161 chp->channel, drive, mode), DEBUG_PROBE); 6162 pci_conf_write(sc->sc_pc, sc->sc_tag, 6163 PDC2xx_TIM(chp->channel, drive), mode); 6164 } 6165 if (idedma_ctl != 0) { 6166 /* Add software bits in status register */ 6167 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6168 IDEDMA_CTL(channel), idedma_ctl); 6169 } 6170 pciide_print_modes(cp); 6171 } 6172 6173 void 6174 pdc20268_setup_channel(struct channel_softc *chp) 6175 { 6176 struct ata_drive_datas *drvp; 6177 int drive, cable; 6178 u_int32_t idedma_ctl; 6179 struct pciide_channel *cp = (struct pciide_channel *)chp; 6180 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6181 int channel = chp->channel; 6182 6183 /* check 80 pins cable */ 6184 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6185 6186 /* setup DMA if needed */ 6187 pciide_channel_dma_setup(cp); 6188 6189 idedma_ctl = 0; 6190 6191 for (drive = 0; drive < 2; drive++) { 6192 drvp = &chp->ch_drive[drive]; 6193 /* If no drive, skip */ 6194 if ((drvp->drive_flags & DRIVE) == 0) 6195 continue; 6196 if (drvp->drive_flags & DRIVE_UDMA) { 6197 /* use Ultra/DMA */ 6198 drvp->drive_flags &= ~DRIVE_DMA; 6199 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6200 if (cable && drvp->UDMA_mode > 2) { 6201 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6202 "cable not detected\n", drvp->drive_name, 6203 sc->sc_wdcdev.sc_dev.dv_xname, 6204 channel, drive), DEBUG_PROBE); 6205 drvp->UDMA_mode = 2; 6206 } 6207 } else if (drvp->drive_flags & DRIVE_DMA) { 6208 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6209 } 6210 } 6211 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6212 if (idedma_ctl != 0) { 6213 /* Add software bits in status register */ 6214 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6215 IDEDMA_CTL(channel), idedma_ctl); 6216 } 6217 pciide_print_modes(cp); 6218 } 6219 6220 int 6221 pdc202xx_pci_intr(void *arg) 6222 { 6223 struct pciide_softc *sc = arg; 6224 struct pciide_channel *cp; 6225 struct channel_softc *wdc_cp; 6226 int i, rv, crv; 6227 u_int32_t scr; 6228 6229 rv = 0; 6230 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6231 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6232 cp = &sc->pciide_channels[i]; 6233 wdc_cp = &cp->wdc_channel; 6234 /* If a compat channel skip. */ 6235 if (cp->compat) 6236 continue; 6237 if (scr & PDC2xx_SCR_INT(i)) { 6238 crv = wdcintr(wdc_cp); 6239 if (crv == 0) 6240 printf("%s:%d: bogus intr (reg 0x%x)\n", 6241 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6242 else 6243 rv = 1; 6244 } 6245 } 6246 return (rv); 6247 } 6248 6249 int 6250 pdc20265_pci_intr(void *arg) 6251 { 6252 struct pciide_softc *sc = arg; 6253 struct pciide_channel *cp; 6254 struct channel_softc *wdc_cp; 6255 int i, rv, crv; 6256 u_int32_t dmastat; 6257 6258 rv = 0; 6259 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6260 cp = &sc->pciide_channels[i]; 6261 wdc_cp = &cp->wdc_channel; 6262 /* If a compat channel skip. */ 6263 if (cp->compat) 6264 continue; 6265 6266 /* 6267 * In case of shared IRQ check that the interrupt 6268 * was actually generated by this channel. 6269 * Only check the channel that is enabled. 6270 */ 6271 if (cp->hw_ok && PDC_IS_268(sc)) { 6272 if ((pdc268_config_read(wdc_cp, 6273 0x0b) & PDC268_INTR) == 0) 6274 continue; 6275 } 6276 6277 /* 6278 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6279 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6280 * So use it instead (requires 2 reg reads instead of 1, 6281 * but we can't do it another way). 6282 */ 6283 dmastat = bus_space_read_1(sc->sc_dma_iot, 6284 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6285 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6286 continue; 6287 6288 crv = wdcintr(wdc_cp); 6289 if (crv == 0) 6290 printf("%s:%d: bogus intr\n", 6291 sc->sc_wdcdev.sc_dev.dv_xname, i); 6292 else 6293 rv = 1; 6294 } 6295 return (rv); 6296 } 6297 6298 void 6299 pdc20262_dma_start(void *v, int channel, int drive) 6300 { 6301 struct pciide_softc *sc = v; 6302 struct pciide_dma_maps *dma_maps = 6303 &sc->pciide_channels[channel].dma_maps[drive]; 6304 u_int8_t clock; 6305 u_int32_t count; 6306 6307 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6308 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6309 PDC262_U66); 6310 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6311 PDC262_U66, clock | PDC262_U66_EN(channel)); 6312 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6313 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6314 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6315 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6316 PDC262_ATAPI(channel), count); 6317 } 6318 6319 pciide_dma_start(v, channel, drive); 6320 } 6321 6322 int 6323 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6324 { 6325 struct pciide_softc *sc = v; 6326 struct pciide_dma_maps *dma_maps = 6327 &sc->pciide_channels[channel].dma_maps[drive]; 6328 u_int8_t clock; 6329 6330 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6331 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6332 PDC262_U66); 6333 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6334 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6335 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6336 PDC262_ATAPI(channel), 0); 6337 } 6338 6339 return (pciide_dma_finish(v, channel, drive, force)); 6340 } 6341 6342 void 6343 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6344 { 6345 struct pciide_channel *cp; 6346 struct channel_softc *wdc_cp; 6347 struct pciide_pdcsata *ps; 6348 int channel, i; 6349 bus_size_t dmasize; 6350 pci_intr_handle_t intrhandle; 6351 const char *intrstr; 6352 6353 /* Allocate memory for private data */ 6354 sc->sc_cookie = malloc(sizeof(*ps), M_DEVBUF, M_NOWAIT | M_ZERO); 6355 ps = sc->sc_cookie; 6356 6357 /* 6358 * Promise SATA controllers have 3 or 4 channels, 6359 * the usual IDE registers are mapped in I/O space, with offsets. 6360 */ 6361 if (pci_intr_map(pa, &intrhandle) != 0) { 6362 printf(": couldn't map interrupt\n"); 6363 return; 6364 } 6365 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6366 6367 switch (sc->sc_pp->ide_product) { 6368 case PCI_PRODUCT_PROMISE_PDC20318: 6369 case PCI_PRODUCT_PROMISE_PDC20319: 6370 case PCI_PRODUCT_PROMISE_PDC20371: 6371 case PCI_PRODUCT_PROMISE_PDC20375: 6372 case PCI_PRODUCT_PROMISE_PDC20376: 6373 case PCI_PRODUCT_PROMISE_PDC20377: 6374 case PCI_PRODUCT_PROMISE_PDC20378: 6375 case PCI_PRODUCT_PROMISE_PDC20379: 6376 default: 6377 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6378 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6379 sc->sc_wdcdev.sc_dev.dv_xname); 6380 break; 6381 6382 case PCI_PRODUCT_PROMISE_PDC40518: 6383 case PCI_PRODUCT_PROMISE_PDC40519: 6384 case PCI_PRODUCT_PROMISE_PDC40718: 6385 case PCI_PRODUCT_PROMISE_PDC40719: 6386 case PCI_PRODUCT_PROMISE_PDC40779: 6387 case PCI_PRODUCT_PROMISE_PDC20571: 6388 case PCI_PRODUCT_PROMISE_PDC20575: 6389 case PCI_PRODUCT_PROMISE_PDC20579: 6390 case PCI_PRODUCT_PROMISE_PDC20771: 6391 case PCI_PRODUCT_PROMISE_PDC20775: 6392 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6393 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6394 sc->sc_wdcdev.sc_dev.dv_xname); 6395 break; 6396 } 6397 6398 if (sc->sc_pci_ih == NULL) { 6399 printf(": couldn't establish native-PCI interrupt"); 6400 if (intrstr != NULL) 6401 printf(" at %s", intrstr); 6402 printf("\n"); 6403 return; 6404 } 6405 6406 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6407 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6408 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6409 if (!sc->sc_dma_ok) { 6410 printf(": couldn't map bus-master DMA registers\n"); 6411 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6412 return; 6413 } 6414 6415 sc->sc_dmat = pa->pa_dmat; 6416 6417 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6418 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6419 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6420 printf(": couldn't map IDE registers\n"); 6421 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6422 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6423 return; 6424 } 6425 6426 printf(": DMA\n"); 6427 6428 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6429 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6430 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6431 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6432 sc->sc_wdcdev.PIO_cap = 4; 6433 sc->sc_wdcdev.DMA_cap = 2; 6434 sc->sc_wdcdev.UDMA_cap = 6; 6435 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6436 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6437 6438 switch (sc->sc_pp->ide_product) { 6439 case PCI_PRODUCT_PROMISE_PDC20318: 6440 case PCI_PRODUCT_PROMISE_PDC20319: 6441 case PCI_PRODUCT_PROMISE_PDC20371: 6442 case PCI_PRODUCT_PROMISE_PDC20375: 6443 case PCI_PRODUCT_PROMISE_PDC20376: 6444 case PCI_PRODUCT_PROMISE_PDC20377: 6445 case PCI_PRODUCT_PROMISE_PDC20378: 6446 case PCI_PRODUCT_PROMISE_PDC20379: 6447 default: 6448 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6449 sc->sc_wdcdev.nchannels = 6450 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6451 PDC203xx_NCHANNELS : 3; 6452 break; 6453 6454 case PCI_PRODUCT_PROMISE_PDC40518: 6455 case PCI_PRODUCT_PROMISE_PDC40519: 6456 case PCI_PRODUCT_PROMISE_PDC40718: 6457 case PCI_PRODUCT_PROMISE_PDC40719: 6458 case PCI_PRODUCT_PROMISE_PDC40779: 6459 case PCI_PRODUCT_PROMISE_PDC20571: 6460 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6461 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6462 6463 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6464 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6465 6466 break; 6467 case PCI_PRODUCT_PROMISE_PDC20575: 6468 case PCI_PRODUCT_PROMISE_PDC20579: 6469 case PCI_PRODUCT_PROMISE_PDC20771: 6470 case PCI_PRODUCT_PROMISE_PDC20775: 6471 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6472 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6473 6474 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6475 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6476 6477 break; 6478 } 6479 6480 sc->sc_wdcdev.dma_arg = sc; 6481 sc->sc_wdcdev.dma_init = pciide_dma_init; 6482 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6483 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6484 6485 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6486 channel++) { 6487 cp = &sc->pciide_channels[channel]; 6488 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6489 6490 cp->ih = sc->sc_pci_ih; 6491 cp->name = NULL; 6492 cp->wdc_channel.channel = channel; 6493 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6494 cp->wdc_channel.ch_queue = 6495 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 6496 if (cp->wdc_channel.ch_queue == NULL) { 6497 printf("%s: channel %d: " 6498 "can't allocate memory for command queue\n", 6499 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6500 continue; 6501 } 6502 wdc_cp = &cp->wdc_channel; 6503 6504 ps->regs[channel].ctl_iot = ps->ba5_st; 6505 ps->regs[channel].cmd_iot = ps->ba5_st; 6506 6507 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6508 0x0238 + (channel << 7), 1, 6509 &ps->regs[channel].ctl_ioh) != 0) { 6510 printf("%s: couldn't map channel %d ctl regs\n", 6511 sc->sc_wdcdev.sc_dev.dv_xname, 6512 channel); 6513 continue; 6514 } 6515 for (i = 0; i < WDC_NREG; i++) { 6516 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6517 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6518 &ps->regs[channel].cmd_iohs[i]) != 0) { 6519 printf("%s: couldn't map channel %d cmd " 6520 "regs\n", 6521 sc->sc_wdcdev.sc_dev.dv_xname, 6522 channel); 6523 continue; 6524 } 6525 } 6526 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6527 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6528 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6529 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6530 wdc_cp->data32iot = wdc_cp->cmd_iot = 6531 ps->regs[channel].cmd_iot; 6532 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6533 ps->regs[channel].cmd_iohs[0]; 6534 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6535 6536 /* 6537 * Subregion de busmaster registers. They're spread all over 6538 * the controller's register space :(. They are also 4 bytes 6539 * sized, with some specific extentions in the extra bits. 6540 * It also seems that the IDEDMA_CTL register isn't available. 6541 */ 6542 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6543 0x260 + (channel << 7), 1, 6544 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 6545 printf("%s channel %d: can't subregion DMA " 6546 "registers\n", 6547 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6548 continue; 6549 } 6550 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6551 0x244 + (channel << 7), 4, 6552 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 6553 printf("%s channel %d: can't subregion DMA " 6554 "registers\n", 6555 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6556 continue; 6557 } 6558 6559 wdcattach(wdc_cp); 6560 bus_space_write_4(sc->sc_dma_iot, 6561 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6562 (bus_space_read_4(sc->sc_dma_iot, 6563 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6564 0) & ~0x00003f9f) | (channel + 1)); 6565 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6566 (channel + 1) << 2, 0x00000001); 6567 6568 pdc203xx_setup_channel(&cp->wdc_channel); 6569 } 6570 6571 printf("%s: using %s for native-PCI interrupt\n", 6572 sc->sc_wdcdev.sc_dev.dv_xname, 6573 intrstr ? intrstr : "unknown interrupt"); 6574 } 6575 6576 void 6577 pdc203xx_setup_channel(struct channel_softc *chp) 6578 { 6579 struct ata_drive_datas *drvp; 6580 struct pciide_channel *cp = (struct pciide_channel *)chp; 6581 int drive, s; 6582 6583 pciide_channel_dma_setup(cp); 6584 6585 for (drive = 0; drive < 2; drive++) { 6586 drvp = &chp->ch_drive[drive]; 6587 if ((drvp->drive_flags & DRIVE) == 0) 6588 continue; 6589 if (drvp->drive_flags & DRIVE_UDMA) { 6590 s = splbio(); 6591 drvp->drive_flags &= ~DRIVE_DMA; 6592 splx(s); 6593 } 6594 } 6595 pciide_print_modes(cp); 6596 } 6597 6598 int 6599 pdc203xx_pci_intr(void *arg) 6600 { 6601 struct pciide_softc *sc = arg; 6602 struct pciide_channel *cp; 6603 struct channel_softc *wdc_cp; 6604 struct pciide_pdcsata *ps = sc->sc_cookie; 6605 int i, rv, crv; 6606 u_int32_t scr; 6607 6608 rv = 0; 6609 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 6610 6611 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6612 cp = &sc->pciide_channels[i]; 6613 wdc_cp = &cp->wdc_channel; 6614 if (scr & (1 << (i + 1))) { 6615 crv = wdcintr(wdc_cp); 6616 if (crv == 0) { 6617 printf("%s:%d: bogus intr (reg 0x%x)\n", 6618 sc->sc_wdcdev.sc_dev.dv_xname, 6619 i, scr); 6620 } else 6621 rv = 1; 6622 } 6623 } 6624 6625 return (rv); 6626 } 6627 6628 int 6629 pdc205xx_pci_intr(void *arg) 6630 { 6631 struct pciide_softc *sc = arg; 6632 struct pciide_channel *cp; 6633 struct channel_softc *wdc_cp; 6634 struct pciide_pdcsata *ps = sc->sc_cookie; 6635 int i, rv, crv; 6636 u_int32_t scr, status; 6637 6638 rv = 0; 6639 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 6640 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 6641 6642 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 6643 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 6644 6645 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6646 cp = &sc->pciide_channels[i]; 6647 wdc_cp = &cp->wdc_channel; 6648 if (scr & (1 << (i + 1))) { 6649 crv = wdcintr(wdc_cp); 6650 if (crv == 0) { 6651 printf("%s:%d: bogus intr (reg 0x%x)\n", 6652 sc->sc_wdcdev.sc_dev.dv_xname, 6653 i, scr); 6654 } else 6655 rv = 1; 6656 } 6657 } 6658 return rv; 6659 } 6660 6661 void 6662 pdc203xx_irqack(struct channel_softc *chp) 6663 { 6664 struct pciide_channel *cp = (struct pciide_channel *)chp; 6665 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6666 struct pciide_pdcsata *ps = sc->sc_cookie; 6667 int chan = chp->channel; 6668 6669 bus_space_write_4(sc->sc_dma_iot, 6670 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 6671 (bus_space_read_4(sc->sc_dma_iot, 6672 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 6673 0) & ~0x00003f9f) | (chan + 1)); 6674 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6675 (chan + 1) << 2, 0x00000001); 6676 } 6677 6678 void 6679 pdc203xx_dma_start(void *v, int channel, int drive) 6680 { 6681 struct pciide_softc *sc = v; 6682 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6683 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6684 struct pciide_pdcsata *ps = sc->sc_cookie; 6685 6686 /* Write table address */ 6687 bus_space_write_4(sc->sc_dma_iot, 6688 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 6689 dma_maps->dmamap_table->dm_segs[0].ds_addr); 6690 6691 /* Start DMA engine */ 6692 bus_space_write_4(sc->sc_dma_iot, 6693 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6694 (bus_space_read_4(sc->sc_dma_iot, 6695 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6696 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 6697 } 6698 6699 int 6700 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 6701 { 6702 struct pciide_softc *sc = v; 6703 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6704 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6705 struct pciide_pdcsata *ps = sc->sc_cookie; 6706 6707 /* Stop DMA channel */ 6708 bus_space_write_4(sc->sc_dma_iot, 6709 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6710 (bus_space_read_4(sc->sc_dma_iot, 6711 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6712 0) & ~0x80)); 6713 6714 /* Unload the map of the data buffer */ 6715 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 6716 dma_maps->dmamap_xfer->dm_mapsize, 6717 (dma_maps->dma_flags & WDC_DMA_READ) ? 6718 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 6719 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 6720 6721 return (0); 6722 } 6723 6724 u_int8_t 6725 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 6726 { 6727 struct pciide_channel *cp = (struct pciide_channel *)chp; 6728 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6729 struct pciide_pdcsata *ps = sc->sc_cookie; 6730 u_int8_t val; 6731 6732 if (reg & _WDC_AUX) { 6733 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 6734 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 6735 } else { 6736 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 6737 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 6738 return (val); 6739 } 6740 } 6741 6742 void 6743 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 6744 { 6745 struct pciide_channel *cp = (struct pciide_channel *)chp; 6746 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6747 struct pciide_pdcsata *ps = sc->sc_cookie; 6748 6749 if (reg & _WDC_AUX) 6750 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 6751 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 6752 else 6753 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 6754 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 6755 0, val); 6756 } 6757 6758 void 6759 pdc205xx_do_reset(struct channel_softc *chp) 6760 { 6761 struct pciide_channel *cp = (struct pciide_channel *)chp; 6762 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6763 struct pciide_pdcsata *ps = sc->sc_cookie; 6764 u_int32_t scontrol; 6765 6766 wdc_do_reset(chp); 6767 6768 /* reset SATA */ 6769 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 6770 SCONTROL_WRITE(ps, chp->channel, scontrol); 6771 delay(50*1000); 6772 6773 scontrol &= ~SControl_DET_INIT; 6774 SCONTROL_WRITE(ps, chp->channel, scontrol); 6775 delay(50*1000); 6776 } 6777 6778 void 6779 pdc205xx_drv_probe(struct channel_softc *chp) 6780 { 6781 struct pciide_channel *cp = (struct pciide_channel *)chp; 6782 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6783 struct pciide_pdcsata *ps = sc->sc_cookie; 6784 bus_space_handle_t *iohs; 6785 u_int32_t scontrol, sstatus; 6786 u_int16_t scnt, sn, cl, ch; 6787 int i, s; 6788 6789 /* XXX This should be done by other code. */ 6790 for (i = 0; i < 2; i++) { 6791 chp->ch_drive[i].chnl_softc = chp; 6792 chp->ch_drive[i].drive = i; 6793 } 6794 6795 SCONTROL_WRITE(ps, chp->channel, 0); 6796 delay(50*1000); 6797 6798 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 6799 SCONTROL_WRITE(ps,chp->channel,scontrol); 6800 delay(50*1000); 6801 6802 scontrol &= ~SControl_DET_INIT; 6803 SCONTROL_WRITE(ps,chp->channel,scontrol); 6804 delay(50*1000); 6805 6806 sstatus = SSTATUS_READ(ps,chp->channel); 6807 6808 switch (sstatus & SStatus_DET_mask) { 6809 case SStatus_DET_NODEV: 6810 /* No Device; be silent. */ 6811 break; 6812 6813 case SStatus_DET_DEV_NE: 6814 printf("%s: port %d: device connected, but " 6815 "communication not established\n", 6816 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6817 break; 6818 6819 case SStatus_DET_OFFLINE: 6820 printf("%s: port %d: PHY offline\n", 6821 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6822 break; 6823 6824 case SStatus_DET_DEV: 6825 iohs = ps->regs[chp->channel].cmd_iohs; 6826 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 6827 WDSD_IBM); 6828 delay(10); /* 400ns delay */ 6829 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 6830 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 6831 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 6832 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 6833 #if 0 6834 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 6835 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 6836 scnt, sn, cl, ch); 6837 #endif 6838 /* 6839 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 6840 * cases we get wrong values here, so ignore it. 6841 */ 6842 s = splbio(); 6843 if (cl == 0x14 && ch == 0xeb) 6844 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 6845 else 6846 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 6847 splx(s); 6848 #if 0 6849 printf("%s: port %d: device present", 6850 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6851 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 6852 case 1: 6853 printf(", speed: 1.5Gb/s"); 6854 break; 6855 case 2: 6856 printf(", speed: 3.0Gb/s"); 6857 break; 6858 } 6859 printf("\n"); 6860 #endif 6861 break; 6862 6863 default: 6864 printf("%s: port %d: unknown SStatus: 0x%08x\n", 6865 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 6866 } 6867 } 6868 6869 #ifdef notyet 6870 /* 6871 * Inline functions for accessing the timing registers of the 6872 * OPTi controller. 6873 * 6874 * These *MUST* disable interrupts as they need atomic access to 6875 * certain magic registers. Failure to adhere to this *will* 6876 * break things in subtle ways if the wdc registers are accessed 6877 * by an interrupt routine while this magic sequence is executing. 6878 */ 6879 static __inline__ u_int8_t 6880 opti_read_config(struct channel_softc *chp, int reg) 6881 { 6882 u_int8_t rv; 6883 int s = splhigh(); 6884 6885 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 6886 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 6887 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 6888 6889 /* Followed by an 8-bit write of 0x3 to register #2 */ 6890 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 6891 6892 /* Now we can read the required register */ 6893 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 6894 6895 /* Restore the real registers */ 6896 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 6897 6898 splx(s); 6899 6900 return (rv); 6901 } 6902 6903 static __inline__ void 6904 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 6905 { 6906 int s = splhigh(); 6907 6908 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 6909 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 6910 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 6911 6912 /* Followed by an 8-bit write of 0x3 to register #2 */ 6913 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 6914 6915 /* Now we can write the required register */ 6916 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 6917 6918 /* Restore the real registers */ 6919 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 6920 6921 splx(s); 6922 } 6923 6924 void 6925 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6926 { 6927 struct pciide_channel *cp; 6928 bus_size_t cmdsize, ctlsize; 6929 pcireg_t interface; 6930 u_int8_t init_ctrl; 6931 int channel; 6932 6933 printf(": DMA"); 6934 /* 6935 * XXXSCW: 6936 * There seem to be a couple of buggy revisions/implementations 6937 * of the OPTi pciide chipset. This kludge seems to fix one of 6938 * the reported problems (NetBSD PR/11644) but still fails for the 6939 * other (NetBSD PR/13151), although the latter may be due to other 6940 * issues too... 6941 */ 6942 if (sc->sc_rev <= 0x12) { 6943 printf(" (disabled)"); 6944 sc->sc_dma_ok = 0; 6945 sc->sc_wdcdev.cap = 0; 6946 } else { 6947 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 6948 pciide_mapreg_dma(sc, pa); 6949 } 6950 6951 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 6952 sc->sc_wdcdev.PIO_cap = 4; 6953 if (sc->sc_dma_ok) { 6954 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 6955 sc->sc_wdcdev.irqack = pciide_irqack; 6956 sc->sc_wdcdev.DMA_cap = 2; 6957 } 6958 sc->sc_wdcdev.set_modes = opti_setup_channel; 6959 6960 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6961 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6962 6963 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 6964 OPTI_REG_INIT_CONTROL); 6965 6966 interface = PCI_INTERFACE(pa->pa_class); 6967 6968 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6969 6970 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6971 cp = &sc->pciide_channels[channel]; 6972 if (pciide_chansetup(sc, channel, interface) == 0) 6973 continue; 6974 if (channel == 1 && 6975 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 6976 printf("%s: %s ignored (disabled)\n", 6977 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6978 continue; 6979 } 6980 pciide_map_compat_intr(pa, cp, channel, interface); 6981 if (cp->hw_ok == 0) 6982 continue; 6983 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6984 pciide_pci_intr); 6985 if (cp->hw_ok == 0) { 6986 pciide_unmap_compat_intr(pa, cp, channel, interface); 6987 continue; 6988 } 6989 opti_setup_channel(&cp->wdc_channel); 6990 } 6991 } 6992 6993 void 6994 opti_setup_channel(struct channel_softc *chp) 6995 { 6996 struct ata_drive_datas *drvp; 6997 struct pciide_channel *cp = (struct pciide_channel *)chp; 6998 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6999 int drive, spd; 7000 int mode[2]; 7001 u_int8_t rv, mr; 7002 7003 /* 7004 * The `Delay' and `Address Setup Time' fields of the 7005 * Miscellaneous Register are always zero initially. 7006 */ 7007 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 7008 mr &= ~(OPTI_MISC_DELAY_MASK | 7009 OPTI_MISC_ADDR_SETUP_MASK | 7010 OPTI_MISC_INDEX_MASK); 7011 7012 /* Prime the control register before setting timing values */ 7013 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 7014 7015 /* Determine the clockrate of the PCIbus the chip is attached to */ 7016 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7017 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7018 7019 /* setup DMA if needed */ 7020 pciide_channel_dma_setup(cp); 7021 7022 for (drive = 0; drive < 2; drive++) { 7023 drvp = &chp->ch_drive[drive]; 7024 /* If no drive, skip */ 7025 if ((drvp->drive_flags & DRIVE) == 0) { 7026 mode[drive] = -1; 7027 continue; 7028 } 7029 7030 if ((drvp->drive_flags & DRIVE_DMA)) { 7031 /* 7032 * Timings will be used for both PIO and DMA, 7033 * so adjust DMA mode if needed 7034 */ 7035 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7036 drvp->PIO_mode = drvp->DMA_mode + 2; 7037 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7038 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7039 drvp->PIO_mode - 2 : 0; 7040 if (drvp->DMA_mode == 0) 7041 drvp->PIO_mode = 0; 7042 7043 mode[drive] = drvp->DMA_mode + 5; 7044 } else 7045 mode[drive] = drvp->PIO_mode; 7046 7047 if (drive && mode[0] >= 0 && 7048 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7049 /* 7050 * Can't have two drives using different values 7051 * for `Address Setup Time'. 7052 * Slow down the faster drive to compensate. 7053 */ 7054 int d = (opti_tim_as[spd][mode[0]] > 7055 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7056 7057 mode[d] = mode[1-d]; 7058 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7059 chp->ch_drive[d].DMA_mode = 0; 7060 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7061 } 7062 } 7063 7064 for (drive = 0; drive < 2; drive++) { 7065 int m; 7066 if ((m = mode[drive]) < 0) 7067 continue; 7068 7069 /* Set the Address Setup Time and select appropriate index */ 7070 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7071 rv |= OPTI_MISC_INDEX(drive); 7072 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7073 7074 /* Set the pulse width and recovery timing parameters */ 7075 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7076 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7077 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7078 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7079 7080 /* Set the Enhanced Mode register appropriately */ 7081 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7082 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7083 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7084 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7085 } 7086 7087 /* Finally, enable the timings */ 7088 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7089 7090 pciide_print_modes(cp); 7091 } 7092 #endif 7093 7094 void 7095 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7096 { 7097 struct pciide_channel *cp; 7098 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7099 pcitag_t pcib_tag; 7100 int channel; 7101 bus_size_t cmdsize, ctlsize; 7102 7103 printf(": DMA"); 7104 pciide_mapreg_dma(sc, pa); 7105 printf("\n"); 7106 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7107 WDC_CAPABILITY_MODE; 7108 7109 if (sc->sc_dma_ok) { 7110 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7111 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7112 sc->sc_wdcdev.irqack = pciide_irqack; 7113 } 7114 sc->sc_wdcdev.PIO_cap = 4; 7115 sc->sc_wdcdev.DMA_cap = 2; 7116 switch (sc->sc_pp->ide_product) { 7117 case PCI_PRODUCT_RCC_OSB4_IDE: 7118 sc->sc_wdcdev.UDMA_cap = 2; 7119 break; 7120 case PCI_PRODUCT_RCC_CSB5_IDE: 7121 if (sc->sc_rev < 0x92) 7122 sc->sc_wdcdev.UDMA_cap = 4; 7123 else 7124 sc->sc_wdcdev.UDMA_cap = 5; 7125 break; 7126 case PCI_PRODUCT_RCC_CSB6_IDE: 7127 sc->sc_wdcdev.UDMA_cap = 4; 7128 break; 7129 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7130 sc->sc_wdcdev.UDMA_cap = 5; 7131 break; 7132 } 7133 7134 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7135 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7136 sc->sc_wdcdev.nchannels = 7137 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7138 7139 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7140 cp = &sc->pciide_channels[channel]; 7141 if (pciide_chansetup(sc, channel, interface) == 0) 7142 continue; 7143 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7144 serverworks_pci_intr); 7145 if (cp->hw_ok == 0) 7146 return; 7147 pciide_map_compat_intr(pa, cp, channel, interface); 7148 if (cp->hw_ok == 0) 7149 return; 7150 serverworks_setup_channel(&cp->wdc_channel); 7151 } 7152 7153 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7154 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7155 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7156 } 7157 7158 void 7159 serverworks_setup_channel(struct channel_softc *chp) 7160 { 7161 struct ata_drive_datas *drvp; 7162 struct pciide_channel *cp = (struct pciide_channel *)chp; 7163 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7164 int channel = chp->channel; 7165 int drive, unit; 7166 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7167 u_int32_t idedma_ctl; 7168 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7169 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7170 7171 /* setup DMA if needed */ 7172 pciide_channel_dma_setup(cp); 7173 7174 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7175 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7176 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7177 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7178 7179 pio_time &= ~(0xffff << (16 * channel)); 7180 dma_time &= ~(0xffff << (16 * channel)); 7181 pio_mode &= ~(0xff << (8 * channel + 16)); 7182 udma_mode &= ~(0xff << (8 * channel + 16)); 7183 udma_mode &= ~(3 << (2 * channel)); 7184 7185 idedma_ctl = 0; 7186 7187 /* Per drive settings */ 7188 for (drive = 0; drive < 2; drive++) { 7189 drvp = &chp->ch_drive[drive]; 7190 /* If no drive, skip */ 7191 if ((drvp->drive_flags & DRIVE) == 0) 7192 continue; 7193 unit = drive + 2 * channel; 7194 /* add timing values, setup DMA if needed */ 7195 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7196 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7197 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7198 (drvp->drive_flags & DRIVE_UDMA)) { 7199 /* use Ultra/DMA, check for 80-pin cable */ 7200 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7201 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7202 PCI_SUBSYS_ID_REG)) & 7203 (1 << (14 + channel))) == 0) { 7204 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7205 "cable not detected\n", drvp->drive_name, 7206 sc->sc_wdcdev.sc_dev.dv_xname, 7207 channel, drive), DEBUG_PROBE); 7208 drvp->UDMA_mode = 2; 7209 } 7210 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7211 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7212 udma_mode |= 1 << unit; 7213 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7214 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7215 (drvp->drive_flags & DRIVE_DMA)) { 7216 /* use Multiword DMA */ 7217 drvp->drive_flags &= ~DRIVE_UDMA; 7218 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7219 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7220 } else { 7221 /* PIO only */ 7222 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7223 } 7224 } 7225 7226 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7227 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7228 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7229 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7230 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7231 7232 if (idedma_ctl != 0) { 7233 /* Add software bits in status register */ 7234 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7235 IDEDMA_CTL(channel), idedma_ctl); 7236 } 7237 pciide_print_modes(cp); 7238 } 7239 7240 int 7241 serverworks_pci_intr(void *arg) 7242 { 7243 struct pciide_softc *sc = arg; 7244 struct pciide_channel *cp; 7245 struct channel_softc *wdc_cp; 7246 int rv = 0; 7247 int dmastat, i, crv; 7248 7249 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7250 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7251 IDEDMA_CTL(i)); 7252 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7253 IDEDMA_CTL_INTR) 7254 continue; 7255 cp = &sc->pciide_channels[i]; 7256 wdc_cp = &cp->wdc_channel; 7257 crv = wdcintr(wdc_cp); 7258 if (crv == 0) { 7259 printf("%s:%d: bogus intr\n", 7260 sc->sc_wdcdev.sc_dev.dv_xname, i); 7261 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7262 IDEDMA_CTL(i), dmastat); 7263 } else 7264 rv = 1; 7265 } 7266 return (rv); 7267 } 7268 7269 void 7270 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7271 { 7272 struct pciide_channel *cp; 7273 pci_intr_handle_t intrhandle; 7274 const char *intrstr; 7275 int channel; 7276 struct pciide_svwsata *ss; 7277 7278 /* Allocate memory for private data */ 7279 sc->sc_cookie = malloc(sizeof(*ss), M_DEVBUF, M_NOWAIT | M_ZERO); 7280 ss = sc->sc_cookie; 7281 7282 /* The 4-port version has a dummy second function. */ 7283 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7284 PCI_MAPREG_START + 0x14) == 0) { 7285 printf("\n"); 7286 return; 7287 } 7288 7289 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7290 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7291 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7292 printf(": unable to map BA5 register space\n"); 7293 return; 7294 } 7295 7296 printf(": DMA"); 7297 svwsata_mapreg_dma(sc, pa); 7298 printf("\n"); 7299 7300 if (sc->sc_dma_ok) { 7301 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7302 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7303 sc->sc_wdcdev.irqack = pciide_irqack; 7304 } 7305 sc->sc_wdcdev.PIO_cap = 4; 7306 sc->sc_wdcdev.DMA_cap = 2; 7307 sc->sc_wdcdev.UDMA_cap = 6; 7308 7309 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7310 sc->sc_wdcdev.nchannels = 4; 7311 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7312 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7313 sc->sc_wdcdev.set_modes = sata_setup_channel; 7314 7315 /* We can use SControl and SStatus to probe for drives. */ 7316 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7317 7318 /* Map and establish the interrupt handler. */ 7319 if(pci_intr_map(pa, &intrhandle) != 0) { 7320 printf("%s: couldn't map native-PCI interrupt\n", 7321 sc->sc_wdcdev.sc_dev.dv_xname); 7322 return; 7323 } 7324 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7325 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7326 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7327 if (sc->sc_pci_ih != NULL) { 7328 printf("%s: using %s for native-PCI interrupt\n", 7329 sc->sc_wdcdev.sc_dev.dv_xname, 7330 intrstr ? intrstr : "unknown interrupt"); 7331 } else { 7332 printf("%s: couldn't establish native-PCI interrupt", 7333 sc->sc_wdcdev.sc_dev.dv_xname); 7334 if (intrstr != NULL) 7335 printf(" at %s", intrstr); 7336 printf("\n"); 7337 return; 7338 } 7339 7340 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7341 cp = &sc->pciide_channels[channel]; 7342 if (pciide_chansetup(sc, channel, 0) == 0) 7343 continue; 7344 svwsata_mapchan(cp); 7345 sata_setup_channel(&cp->wdc_channel); 7346 } 7347 } 7348 7349 void 7350 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7351 { 7352 struct pciide_svwsata *ss = sc->sc_cookie; 7353 7354 sc->sc_wdcdev.dma_arg = sc; 7355 sc->sc_wdcdev.dma_init = pciide_dma_init; 7356 sc->sc_wdcdev.dma_start = pciide_dma_start; 7357 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7358 7359 /* XXX */ 7360 sc->sc_dma_iot = ss->ba5_st; 7361 sc->sc_dma_ioh = ss->ba5_sh; 7362 7363 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7364 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7365 sc->sc_dmactl_read = svwsata_dmactl_read; 7366 sc->sc_dmactl_write = svwsata_dmactl_write; 7367 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7368 7369 /* DMA registers all set up! */ 7370 sc->sc_dmat = pa->pa_dmat; 7371 sc->sc_dma_ok = 1; 7372 } 7373 7374 u_int8_t 7375 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7376 { 7377 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7378 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7379 } 7380 7381 void 7382 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7383 { 7384 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7385 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7386 } 7387 7388 u_int8_t 7389 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7390 { 7391 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7392 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7393 } 7394 7395 void 7396 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7397 { 7398 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7399 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7400 } 7401 7402 void 7403 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7404 { 7405 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7406 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7407 } 7408 7409 void 7410 svwsata_mapchan(struct pciide_channel *cp) 7411 { 7412 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7413 struct channel_softc *wdc_cp = &cp->wdc_channel; 7414 struct pciide_svwsata *ss = sc->sc_cookie; 7415 7416 cp->compat = 0; 7417 cp->ih = sc->sc_pci_ih; 7418 7419 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7420 (wdc_cp->channel << 8) + SVWSATA_TF0, 7421 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7422 printf("%s: couldn't map %s cmd regs\n", 7423 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7424 return; 7425 } 7426 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7427 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7428 &wdc_cp->ctl_ioh) != 0) { 7429 printf("%s: couldn't map %s ctl regs\n", 7430 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7431 return; 7432 } 7433 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7434 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7435 wdcattach(wdc_cp); 7436 } 7437 7438 void 7439 svwsata_drv_probe(struct channel_softc *chp) 7440 { 7441 struct pciide_channel *cp = (struct pciide_channel *)chp; 7442 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7443 struct pciide_svwsata *ss = sc->sc_cookie; 7444 int channel = chp->channel; 7445 uint32_t scontrol, sstatus; 7446 uint8_t scnt, sn, cl, ch; 7447 int i, s; 7448 7449 /* XXX This should be done by other code. */ 7450 for (i = 0; i < 2; i++) { 7451 chp->ch_drive[i].chnl_softc = chp; 7452 chp->ch_drive[i].drive = i; 7453 } 7454 7455 /* 7456 * Request communication initialization sequence, any speed. 7457 * Performing this is the equivalent of an ATA Reset. 7458 */ 7459 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7460 7461 /* 7462 * XXX We don't yet support SATA power management; disable all 7463 * power management state transitions. 7464 */ 7465 scontrol |= SControl_IPM_NONE; 7466 7467 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7468 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7469 delay(50 * 1000); 7470 scontrol &= ~SControl_DET_INIT; 7471 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7472 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7473 delay(50 * 1000); 7474 7475 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7476 (channel << 8) + SVWSATA_SSTATUS); 7477 #if 0 7478 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7479 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7480 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7481 (channel << 8) + SVWSATA_SSTATUS)); 7482 #endif 7483 switch (sstatus & SStatus_DET_mask) { 7484 case SStatus_DET_NODEV: 7485 /* No device; be silent. */ 7486 break; 7487 7488 case SStatus_DET_DEV_NE: 7489 printf("%s: port %d: device connected, but " 7490 "communication not established\n", 7491 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7492 break; 7493 7494 case SStatus_DET_OFFLINE: 7495 printf("%s: port %d: PHY offline\n", 7496 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7497 break; 7498 7499 case SStatus_DET_DEV: 7500 /* 7501 * XXX ATAPI detection doesn't currently work. Don't 7502 * XXX know why. But, it's not like the standard method 7503 * XXX can detect an ATAPI device connected via a SATA/PATA 7504 * XXX bridge, so at least this is no worse. --thorpej 7505 */ 7506 if (chp->_vtbl != NULL) 7507 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7508 else 7509 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7510 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7511 delay(10); /* 400ns delay */ 7512 /* Save register contents. */ 7513 if (chp->_vtbl != NULL) { 7514 scnt = CHP_READ_REG(chp, wdr_seccnt); 7515 sn = CHP_READ_REG(chp, wdr_sector); 7516 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7517 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7518 } else { 7519 scnt = bus_space_read_1(chp->cmd_iot, 7520 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7521 sn = bus_space_read_1(chp->cmd_iot, 7522 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7523 cl = bus_space_read_1(chp->cmd_iot, 7524 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7525 ch = bus_space_read_1(chp->cmd_iot, 7526 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7527 } 7528 #if 0 7529 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7530 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7531 scnt, sn, cl, ch); 7532 #endif 7533 /* 7534 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7535 * cases we get wrong values here, so ignore it. 7536 */ 7537 s = splbio(); 7538 if (cl == 0x14 && ch == 0xeb) 7539 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7540 else 7541 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7542 splx(s); 7543 7544 printf("%s: port %d: device present", 7545 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7546 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7547 case 1: 7548 printf(", speed: 1.5Gb/s"); 7549 break; 7550 case 2: 7551 printf(", speed: 3.0Gb/s"); 7552 break; 7553 } 7554 printf("\n"); 7555 break; 7556 7557 default: 7558 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7559 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7560 } 7561 } 7562 7563 u_int8_t 7564 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7565 { 7566 if (reg & _WDC_AUX) { 7567 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 7568 (reg & _WDC_REGMASK) << 2)); 7569 } else { 7570 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 7571 (reg & _WDC_REGMASK) << 2)); 7572 } 7573 } 7574 7575 void 7576 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7577 { 7578 if (reg & _WDC_AUX) { 7579 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7580 (reg & _WDC_REGMASK) << 2, val); 7581 } else { 7582 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7583 (reg & _WDC_REGMASK) << 2, val); 7584 } 7585 } 7586 7587 void 7588 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 7589 { 7590 if (reg & _WDC_AUX) { 7591 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7592 (reg & _WDC_REGMASK) << 2, val); 7593 } else { 7594 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7595 (reg & _WDC_REGMASK) << 2, val); 7596 } 7597 } 7598 7599 #define ACARD_IS_850(sc) \ 7600 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 7601 7602 void 7603 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7604 { 7605 struct pciide_channel *cp; 7606 int i; 7607 pcireg_t interface; 7608 bus_size_t cmdsize, ctlsize; 7609 7610 /* 7611 * when the chip is in native mode it identifies itself as a 7612 * 'misc mass storage'. Fake interface in this case. 7613 */ 7614 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 7615 interface = PCI_INTERFACE(pa->pa_class); 7616 } else { 7617 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 7618 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 7619 } 7620 7621 printf(": DMA"); 7622 pciide_mapreg_dma(sc, pa); 7623 printf("\n"); 7624 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7625 WDC_CAPABILITY_MODE; 7626 7627 if (sc->sc_dma_ok) { 7628 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7629 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7630 sc->sc_wdcdev.irqack = pciide_irqack; 7631 } 7632 sc->sc_wdcdev.PIO_cap = 4; 7633 sc->sc_wdcdev.DMA_cap = 2; 7634 switch (sc->sc_pp->ide_product) { 7635 case PCI_PRODUCT_ACARD_ATP850U: 7636 sc->sc_wdcdev.UDMA_cap = 2; 7637 break; 7638 case PCI_PRODUCT_ACARD_ATP860: 7639 case PCI_PRODUCT_ACARD_ATP860A: 7640 sc->sc_wdcdev.UDMA_cap = 4; 7641 break; 7642 case PCI_PRODUCT_ACARD_ATP865A: 7643 case PCI_PRODUCT_ACARD_ATP865R: 7644 sc->sc_wdcdev.UDMA_cap = 6; 7645 break; 7646 } 7647 7648 sc->sc_wdcdev.set_modes = acard_setup_channel; 7649 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7650 sc->sc_wdcdev.nchannels = 2; 7651 7652 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7653 cp = &sc->pciide_channels[i]; 7654 if (pciide_chansetup(sc, i, interface) == 0) 7655 continue; 7656 if (interface & PCIIDE_INTERFACE_PCI(i)) { 7657 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 7658 &ctlsize, pciide_pci_intr); 7659 } else { 7660 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 7661 &cmdsize, &ctlsize); 7662 } 7663 if (cp->hw_ok == 0) 7664 return; 7665 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 7666 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 7667 wdcattach(&cp->wdc_channel); 7668 acard_setup_channel(&cp->wdc_channel); 7669 } 7670 if (!ACARD_IS_850(sc)) { 7671 u_int32_t reg; 7672 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 7673 reg &= ~ATP860_CTRL_INT; 7674 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 7675 } 7676 } 7677 7678 void 7679 acard_setup_channel(struct channel_softc *chp) 7680 { 7681 struct ata_drive_datas *drvp; 7682 struct pciide_channel *cp = (struct pciide_channel *)chp; 7683 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7684 int channel = chp->channel; 7685 int drive; 7686 u_int32_t idetime, udma_mode; 7687 u_int32_t idedma_ctl; 7688 7689 /* setup DMA if needed */ 7690 pciide_channel_dma_setup(cp); 7691 7692 if (ACARD_IS_850(sc)) { 7693 idetime = 0; 7694 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 7695 udma_mode &= ~ATP850_UDMA_MASK(channel); 7696 } else { 7697 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 7698 idetime &= ~ATP860_SETTIME_MASK(channel); 7699 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 7700 udma_mode &= ~ATP860_UDMA_MASK(channel); 7701 } 7702 7703 idedma_ctl = 0; 7704 7705 /* Per drive settings */ 7706 for (drive = 0; drive < 2; drive++) { 7707 drvp = &chp->ch_drive[drive]; 7708 /* If no drive, skip */ 7709 if ((drvp->drive_flags & DRIVE) == 0) 7710 continue; 7711 /* add timing values, setup DMA if needed */ 7712 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7713 (drvp->drive_flags & DRIVE_UDMA)) { 7714 /* use Ultra/DMA */ 7715 if (ACARD_IS_850(sc)) { 7716 idetime |= ATP850_SETTIME(drive, 7717 acard_act_udma[drvp->UDMA_mode], 7718 acard_rec_udma[drvp->UDMA_mode]); 7719 udma_mode |= ATP850_UDMA_MODE(channel, drive, 7720 acard_udma_conf[drvp->UDMA_mode]); 7721 } else { 7722 idetime |= ATP860_SETTIME(channel, drive, 7723 acard_act_udma[drvp->UDMA_mode], 7724 acard_rec_udma[drvp->UDMA_mode]); 7725 udma_mode |= ATP860_UDMA_MODE(channel, drive, 7726 acard_udma_conf[drvp->UDMA_mode]); 7727 } 7728 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7729 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7730 (drvp->drive_flags & DRIVE_DMA)) { 7731 /* use Multiword DMA */ 7732 drvp->drive_flags &= ~DRIVE_UDMA; 7733 if (ACARD_IS_850(sc)) { 7734 idetime |= ATP850_SETTIME(drive, 7735 acard_act_dma[drvp->DMA_mode], 7736 acard_rec_dma[drvp->DMA_mode]); 7737 } else { 7738 idetime |= ATP860_SETTIME(channel, drive, 7739 acard_act_dma[drvp->DMA_mode], 7740 acard_rec_dma[drvp->DMA_mode]); 7741 } 7742 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7743 } else { 7744 /* PIO only */ 7745 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7746 if (ACARD_IS_850(sc)) { 7747 idetime |= ATP850_SETTIME(drive, 7748 acard_act_pio[drvp->PIO_mode], 7749 acard_rec_pio[drvp->PIO_mode]); 7750 } else { 7751 idetime |= ATP860_SETTIME(channel, drive, 7752 acard_act_pio[drvp->PIO_mode], 7753 acard_rec_pio[drvp->PIO_mode]); 7754 } 7755 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 7756 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 7757 | ATP8x0_CTRL_EN(channel)); 7758 } 7759 } 7760 7761 if (idedma_ctl != 0) { 7762 /* Add software bits in status register */ 7763 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7764 IDEDMA_CTL(channel), idedma_ctl); 7765 } 7766 pciide_print_modes(cp); 7767 7768 if (ACARD_IS_850(sc)) { 7769 pci_conf_write(sc->sc_pc, sc->sc_tag, 7770 ATP850_IDETIME(channel), idetime); 7771 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 7772 } else { 7773 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 7774 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 7775 } 7776 } 7777 7778 void 7779 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7780 { 7781 struct pciide_channel *cp; 7782 int channel; 7783 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7784 bus_size_t cmdsize, ctlsize; 7785 u_int32_t conf; 7786 7787 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 7788 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 7789 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 7790 7791 printf(": DMA"); 7792 pciide_mapreg_dma(sc, pa); 7793 7794 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7795 WDC_CAPABILITY_MODE; 7796 if (sc->sc_dma_ok) { 7797 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7798 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7799 sc->sc_wdcdev.irqack = pciide_irqack; 7800 } 7801 sc->sc_wdcdev.PIO_cap = 4; 7802 sc->sc_wdcdev.DMA_cap = 2; 7803 switch (sc->sc_pp->ide_product) { 7804 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 7805 sc->sc_wdcdev.UDMA_cap = 5; 7806 break; 7807 default: 7808 sc->sc_wdcdev.UDMA_cap = 6; 7809 } 7810 sc->sc_wdcdev.set_modes = nforce_setup_channel; 7811 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7812 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7813 7814 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7815 7816 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7817 cp = &sc->pciide_channels[channel]; 7818 7819 if (pciide_chansetup(sc, channel, interface) == 0) 7820 continue; 7821 7822 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 7823 printf("%s: %s ignored (disabled)\n", 7824 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7825 continue; 7826 } 7827 7828 pciide_map_compat_intr(pa, cp, channel, interface); 7829 if (cp->hw_ok == 0) 7830 continue; 7831 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7832 nforce_pci_intr); 7833 if (cp->hw_ok == 0) { 7834 pciide_unmap_compat_intr(pa, cp, channel, interface); 7835 continue; 7836 } 7837 7838 if (pciide_chan_candisable(cp)) { 7839 conf &= ~NFORCE_CHAN_EN(channel); 7840 pciide_unmap_compat_intr(pa, cp, channel, interface); 7841 continue; 7842 } 7843 7844 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 7845 } 7846 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 7847 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 7848 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 7849 } 7850 7851 void 7852 nforce_setup_channel(struct channel_softc *chp) 7853 { 7854 struct ata_drive_datas *drvp; 7855 int drive, mode; 7856 u_int32_t idedma_ctl; 7857 struct pciide_channel *cp = (struct pciide_channel *)chp; 7858 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7859 int channel = chp->channel; 7860 u_int32_t conf, piodmatim, piotim, udmatim; 7861 7862 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 7863 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 7864 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 7865 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 7866 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 7867 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 7868 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 7869 7870 /* Setup DMA if needed */ 7871 pciide_channel_dma_setup(cp); 7872 7873 /* Clear all bits for this channel */ 7874 idedma_ctl = 0; 7875 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 7876 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 7877 7878 /* Per channel settings */ 7879 for (drive = 0; drive < 2; drive++) { 7880 drvp = &chp->ch_drive[drive]; 7881 7882 /* If no drive, skip */ 7883 if ((drvp->drive_flags & DRIVE) == 0) 7884 continue; 7885 7886 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 7887 (drvp->drive_flags & DRIVE_UDMA) != 0) { 7888 /* Setup UltraDMA mode */ 7889 drvp->drive_flags &= ~DRIVE_DMA; 7890 7891 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 7892 nforce_udma[drvp->UDMA_mode]) | 7893 NFORCE_UDMA_EN(channel, drive) | 7894 NFORCE_UDMA_ENM(channel, drive); 7895 7896 mode = drvp->PIO_mode; 7897 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 7898 (drvp->drive_flags & DRIVE_DMA) != 0) { 7899 /* Setup multiword DMA mode */ 7900 drvp->drive_flags &= ~DRIVE_UDMA; 7901 7902 /* mode = min(pio, dma + 2) */ 7903 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 7904 mode = drvp->PIO_mode; 7905 else 7906 mode = drvp->DMA_mode + 2; 7907 } else { 7908 mode = drvp->PIO_mode; 7909 goto pio; 7910 } 7911 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7912 7913 pio: 7914 /* Setup PIO mode */ 7915 if (mode <= 2) { 7916 drvp->DMA_mode = 0; 7917 drvp->PIO_mode = 0; 7918 mode = 0; 7919 } else { 7920 drvp->PIO_mode = mode; 7921 drvp->DMA_mode = mode - 2; 7922 } 7923 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 7924 nforce_pio[mode]); 7925 } 7926 7927 if (idedma_ctl != 0) { 7928 /* Add software bits in status register */ 7929 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7930 IDEDMA_CTL(channel), idedma_ctl); 7931 } 7932 7933 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 7934 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 7935 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 7936 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 7937 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 7938 7939 pciide_print_modes(cp); 7940 } 7941 7942 int 7943 nforce_pci_intr(void *arg) 7944 { 7945 struct pciide_softc *sc = arg; 7946 struct pciide_channel *cp; 7947 struct channel_softc *wdc_cp; 7948 int i, rv, crv; 7949 u_int32_t dmastat; 7950 7951 rv = 0; 7952 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7953 cp = &sc->pciide_channels[i]; 7954 wdc_cp = &cp->wdc_channel; 7955 7956 /* Skip compat channel */ 7957 if (cp->compat) 7958 continue; 7959 7960 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7961 IDEDMA_CTL(i)); 7962 if ((dmastat & IDEDMA_CTL_INTR) == 0) 7963 continue; 7964 7965 crv = wdcintr(wdc_cp); 7966 if (crv == 0) 7967 printf("%s:%d: bogus intr\n", 7968 sc->sc_wdcdev.sc_dev.dv_xname, i); 7969 else 7970 rv = 1; 7971 } 7972 return (rv); 7973 } 7974 7975 void 7976 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7977 { 7978 struct pciide_channel *cp; 7979 bus_size_t cmdsize, ctlsize; 7980 pcireg_t interface; 7981 int channel; 7982 7983 printf(": DMA"); 7984 #ifdef PCIIDE_I31244_DISABLEDMA 7985 if (sc->sc_rev == 0) { 7986 printf(" disabled due to rev. 0"); 7987 sc->sc_dma_ok = 0; 7988 } else 7989 #endif 7990 pciide_mapreg_dma(sc, pa); 7991 printf("\n"); 7992 7993 /* 7994 * XXX Configure LEDs to show activity. 7995 */ 7996 7997 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7998 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7999 sc->sc_wdcdev.PIO_cap = 4; 8000 if (sc->sc_dma_ok) { 8001 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8002 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8003 sc->sc_wdcdev.irqack = pciide_irqack; 8004 sc->sc_wdcdev.DMA_cap = 2; 8005 sc->sc_wdcdev.UDMA_cap = 6; 8006 } 8007 sc->sc_wdcdev.set_modes = sata_setup_channel; 8008 8009 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8010 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8011 8012 interface = PCI_INTERFACE(pa->pa_class); 8013 8014 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8015 cp = &sc->pciide_channels[channel]; 8016 if (pciide_chansetup(sc, channel, interface) == 0) 8017 continue; 8018 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8019 pciide_pci_intr); 8020 if (cp->hw_ok == 0) 8021 continue; 8022 pciide_map_compat_intr(pa, cp, channel, interface); 8023 sata_setup_channel(&cp->wdc_channel); 8024 } 8025 } 8026 8027 void 8028 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8029 { 8030 struct pciide_channel *cp; 8031 int channel; 8032 pcireg_t interface; 8033 bus_size_t cmdsize, ctlsize; 8034 pcireg_t cfg, modectl; 8035 8036 /* 8037 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8038 */ 8039 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8040 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8041 8042 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8043 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8044 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8045 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8046 modectl & IT_MODE_MASK), DEBUG_PROBE); 8047 8048 printf(": DMA"); 8049 pciide_mapreg_dma(sc, pa); 8050 8051 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8052 WDC_CAPABILITY_MODE; 8053 if (sc->sc_dma_ok) { 8054 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8055 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8056 sc->sc_wdcdev.irqack = pciide_irqack; 8057 } 8058 sc->sc_wdcdev.PIO_cap = 4; 8059 sc->sc_wdcdev.DMA_cap = 2; 8060 sc->sc_wdcdev.UDMA_cap = 6; 8061 8062 sc->sc_wdcdev.set_modes = ite_setup_channel; 8063 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8064 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8065 8066 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8067 8068 /* Disable RAID */ 8069 modectl &= ~IT_MODE_RAID1; 8070 /* Disable CPU firmware mode */ 8071 modectl &= ~IT_MODE_CPU; 8072 8073 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8074 8075 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8076 cp = &sc->pciide_channels[channel]; 8077 8078 if (pciide_chansetup(sc, channel, interface) == 0) 8079 continue; 8080 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8081 pciide_pci_intr); 8082 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8083 } 8084 8085 /* Re-read configuration registers after channels setup */ 8086 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8087 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8088 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8089 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8090 modectl & IT_MODE_MASK), DEBUG_PROBE); 8091 } 8092 8093 void 8094 ite_setup_channel(struct channel_softc *chp) 8095 { 8096 struct ata_drive_datas *drvp; 8097 int drive, mode; 8098 u_int32_t idedma_ctl; 8099 struct pciide_channel *cp = (struct pciide_channel *)chp; 8100 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8101 int channel = chp->channel; 8102 pcireg_t cfg, modectl; 8103 pcireg_t tim; 8104 8105 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8106 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8107 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8108 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8109 channel, tim), DEBUG_PROBE); 8110 8111 /* Setup DMA if needed */ 8112 pciide_channel_dma_setup(cp); 8113 8114 /* Clear all bits for this channel */ 8115 idedma_ctl = 0; 8116 8117 /* Per channel settings */ 8118 for (drive = 0; drive < 2; drive++) { 8119 drvp = &chp->ch_drive[drive]; 8120 8121 /* If no drive, skip */ 8122 if ((drvp->drive_flags & DRIVE) == 0) 8123 continue; 8124 8125 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8126 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8127 /* Setup UltraDMA mode */ 8128 drvp->drive_flags &= ~DRIVE_DMA; 8129 modectl &= ~IT_MODE_DMA(channel, drive); 8130 8131 #if 0 8132 /* Check cable, works only in CPU firmware mode */ 8133 if (drvp->UDMA_mode > 2 && 8134 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8135 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8136 "80-wire cable not detected\n", 8137 drvp->drive_name, 8138 sc->sc_wdcdev.sc_dev.dv_xname, 8139 channel, drive), DEBUG_PROBE); 8140 drvp->UDMA_mode = 2; 8141 } 8142 #endif 8143 8144 if (drvp->UDMA_mode >= 5) 8145 tim |= IT_TIM_UDMA5(drive); 8146 else 8147 tim &= ~IT_TIM_UDMA5(drive); 8148 8149 mode = drvp->PIO_mode; 8150 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8151 (drvp->drive_flags & DRIVE_DMA) != 0) { 8152 /* Setup multiword DMA mode */ 8153 drvp->drive_flags &= ~DRIVE_UDMA; 8154 modectl |= IT_MODE_DMA(channel, drive); 8155 8156 /* mode = min(pio, dma + 2) */ 8157 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8158 mode = drvp->PIO_mode; 8159 else 8160 mode = drvp->DMA_mode + 2; 8161 } else { 8162 goto pio; 8163 } 8164 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8165 8166 pio: 8167 /* Setup PIO mode */ 8168 if (mode <= 2) { 8169 drvp->DMA_mode = 0; 8170 drvp->PIO_mode = 0; 8171 mode = 0; 8172 } else { 8173 drvp->PIO_mode = mode; 8174 drvp->DMA_mode = mode - 2; 8175 } 8176 8177 /* Enable IORDY if PIO mode >= 3 */ 8178 if (drvp->PIO_mode >= 3) 8179 cfg |= IT_CFG_IORDY(channel); 8180 } 8181 8182 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8183 tim), DEBUG_PROBE); 8184 8185 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8186 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8187 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8188 8189 if (idedma_ctl != 0) { 8190 /* Add software bits in status register */ 8191 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8192 IDEDMA_CTL(channel), idedma_ctl); 8193 } 8194 8195 pciide_print_modes(cp); 8196 } 8197 8198 void 8199 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8200 { 8201 struct pciide_channel *cp; 8202 int channel; 8203 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8204 bus_size_t cmdsize, ctlsize; 8205 8206 printf(": DMA"); 8207 pciide_mapreg_dma(sc, pa); 8208 8209 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8210 WDC_CAPABILITY_MODE; 8211 if (sc->sc_dma_ok) { 8212 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8213 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8214 sc->sc_wdcdev.irqack = pciide_irqack; 8215 } 8216 sc->sc_wdcdev.PIO_cap = 4; 8217 sc->sc_wdcdev.DMA_cap = 2; 8218 sc->sc_wdcdev.UDMA_cap = 6; 8219 8220 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8221 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8222 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8223 8224 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8225 8226 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8227 cp = &sc->pciide_channels[channel]; 8228 if (pciide_chansetup(sc, channel, interface) == 0) 8229 continue; 8230 pciide_map_compat_intr(pa, cp, channel, interface); 8231 if (cp->hw_ok == 0) 8232 continue; 8233 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8234 pciide_pci_intr); 8235 if (cp->hw_ok == 0) { 8236 pciide_unmap_compat_intr(pa, cp, channel, interface); 8237 continue; 8238 } 8239 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8240 } 8241 } 8242 8243 void 8244 ixp_setup_channel(struct channel_softc *chp) 8245 { 8246 struct ata_drive_datas *drvp; 8247 int drive, mode; 8248 u_int32_t idedma_ctl; 8249 struct pciide_channel *cp = (struct pciide_channel*)chp; 8250 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8251 int channel = chp->channel; 8252 pcireg_t udma, mdma_timing, pio, pio_timing; 8253 8254 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8255 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8256 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8257 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8258 8259 /* Setup DMA if needed */ 8260 pciide_channel_dma_setup(cp); 8261 8262 idedma_ctl = 0; 8263 8264 /* Per channel settings */ 8265 for (drive = 0; drive < 2; drive++) { 8266 drvp = &chp->ch_drive[drive]; 8267 8268 /* If no drive, skip */ 8269 if ((drvp->drive_flags & DRIVE) == 0) 8270 continue; 8271 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8272 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8273 /* Setup UltraDMA mode */ 8274 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8275 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8276 IXP_SET_MODE(udma, chp->channel, drive, 8277 drvp->UDMA_mode); 8278 mode = drvp->PIO_mode; 8279 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8280 (drvp->drive_flags & DRIVE_DMA) != 0) { 8281 /* Setup multiword DMA mode */ 8282 drvp->drive_flags &= ~DRIVE_UDMA; 8283 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8284 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8285 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8286 ixp_mdma_timings[drvp->DMA_mode]); 8287 8288 /* mode = min(pio, dma + 2) */ 8289 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8290 mode = drvp->PIO_mode; 8291 else 8292 mode = drvp->DMA_mode + 2; 8293 } else { 8294 mode = drvp->PIO_mode; 8295 } 8296 8297 /* Setup PIO mode */ 8298 drvp->PIO_mode = mode; 8299 if (mode < 2) 8300 drvp->DMA_mode = 0; 8301 else 8302 drvp->DMA_mode = mode - 2; 8303 /* 8304 * Set PIO mode and timings 8305 * Linux driver avoids PIO mode 1, let's do it too. 8306 */ 8307 if (drvp->PIO_mode == 1) 8308 drvp->PIO_mode = 0; 8309 8310 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8311 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8312 ixp_pio_timings[drvp->PIO_mode]); 8313 } 8314 8315 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8316 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8317 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8318 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8319 8320 if (idedma_ctl != 0) { 8321 /* Add software bits in status register */ 8322 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8323 IDEDMA_CTL(channel), idedma_ctl); 8324 } 8325 8326 pciide_print_modes(cp); 8327 } 8328 8329 void 8330 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8331 { 8332 struct pciide_channel *cp; 8333 int channel; 8334 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8335 bus_size_t cmdsize, ctlsize; 8336 u_int32_t conf; 8337 8338 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8339 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8340 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8341 8342 printf(": DMA"); 8343 pciide_mapreg_dma(sc, pa); 8344 8345 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8346 WDC_CAPABILITY_MODE; 8347 if (sc->sc_dma_ok) { 8348 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8349 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8350 sc->sc_wdcdev.irqack = pciide_irqack; 8351 } 8352 sc->sc_wdcdev.PIO_cap = 4; 8353 sc->sc_wdcdev.DMA_cap = 2; 8354 sc->sc_wdcdev.UDMA_cap = 6; 8355 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8356 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8357 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8358 8359 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8360 8361 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8362 cp = &sc->pciide_channels[channel]; 8363 8364 if (pciide_chansetup(sc, channel, interface) == 0) 8365 continue; 8366 8367 #if 0 8368 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8369 printf("%s: %s ignored (disabled)\n", 8370 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8371 continue; 8372 } 8373 #endif 8374 8375 pciide_map_compat_intr(pa, cp, channel, interface); 8376 if (cp->hw_ok == 0) 8377 continue; 8378 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8379 pciide_pci_intr); 8380 if (cp->hw_ok == 0) { 8381 pciide_unmap_compat_intr(pa, cp, channel, interface); 8382 continue; 8383 } 8384 8385 if (pciide_chan_candisable(cp)) { 8386 conf &= ~JMICRON_CHAN_EN(channel); 8387 pciide_unmap_compat_intr(pa, cp, channel, interface); 8388 continue; 8389 } 8390 8391 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8392 } 8393 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8394 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8395 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8396 } 8397 8398 void 8399 jmicron_setup_channel(struct channel_softc *chp) 8400 { 8401 struct ata_drive_datas *drvp; 8402 int drive, mode; 8403 u_int32_t idedma_ctl; 8404 struct pciide_channel *cp = (struct pciide_channel *)chp; 8405 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8406 int channel = chp->channel; 8407 u_int32_t conf; 8408 8409 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8410 8411 /* Setup DMA if needed */ 8412 pciide_channel_dma_setup(cp); 8413 8414 /* Clear all bits for this channel */ 8415 idedma_ctl = 0; 8416 8417 /* Per channel settings */ 8418 for (drive = 0; drive < 2; drive++) { 8419 drvp = &chp->ch_drive[drive]; 8420 8421 /* If no drive, skip */ 8422 if ((drvp->drive_flags & DRIVE) == 0) 8423 continue; 8424 8425 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8426 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8427 /* Setup UltraDMA mode */ 8428 drvp->drive_flags &= ~DRIVE_DMA; 8429 8430 /* see if cable is up to scratch */ 8431 if ((conf & JMICRON_CONF_40PIN) && 8432 (drvp->UDMA_mode > 2)) 8433 drvp->UDMA_mode = 2; 8434 8435 mode = drvp->PIO_mode; 8436 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8437 (drvp->drive_flags & DRIVE_DMA) != 0) { 8438 /* Setup multiword DMA mode */ 8439 drvp->drive_flags &= ~DRIVE_UDMA; 8440 8441 /* mode = min(pio, dma + 2) */ 8442 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8443 mode = drvp->PIO_mode; 8444 else 8445 mode = drvp->DMA_mode + 2; 8446 } else { 8447 mode = drvp->PIO_mode; 8448 goto pio; 8449 } 8450 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8451 8452 pio: 8453 /* Setup PIO mode */ 8454 if (mode <= 2) { 8455 drvp->DMA_mode = 0; 8456 drvp->PIO_mode = 0; 8457 } else { 8458 drvp->PIO_mode = mode; 8459 drvp->DMA_mode = mode - 2; 8460 } 8461 } 8462 8463 if (idedma_ctl != 0) { 8464 /* Add software bits in status register */ 8465 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8466 IDEDMA_CTL(channel), idedma_ctl); 8467 } 8468 8469 pciide_print_modes(cp); 8470 } 8471