1 /* $OpenBSD: pciide.c,v 1.305 2009/11/01 01:50:15 dlg Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 /* 36 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by Christopher G. Demetriou 49 * for the NetBSD Project. 50 * 4. The name of the author may not be used to endorse or promote products 51 * derived from this software without specific prior written permission 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 57 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 58 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 62 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 /* 66 * PCI IDE controller driver. 67 * 68 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 69 * sys/dev/pci/ppb.c, revision 1.16). 70 * 71 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 72 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 73 * 5/16/94" from the PCI SIG. 74 * 75 */ 76 77 #define DEBUG_DMA 0x01 78 #define DEBUG_XFERS 0x02 79 #define DEBUG_FUNCS 0x08 80 #define DEBUG_PROBE 0x10 81 82 #ifdef WDCDEBUG 83 #ifndef WDCDEBUG_PCIIDE_MASK 84 #define WDCDEBUG_PCIIDE_MASK 0x00 85 #endif 86 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 87 #define WDCDEBUG_PRINT(args, level) do { \ 88 if ((wdcdebug_pciide_mask & (level)) != 0) \ 89 printf args; \ 90 } while (0) 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <machine/bus.h> 100 #include <machine/endian.h> 101 102 #include <dev/ata/atavar.h> 103 #include <dev/ata/satareg.h> 104 #include <dev/ic/wdcreg.h> 105 #include <dev/ic/wdcvar.h> 106 107 #include <dev/pci/pcireg.h> 108 #include <dev/pci/pcivar.h> 109 #include <dev/pci/pcidevs.h> 110 111 #include <dev/pci/pciidereg.h> 112 #include <dev/pci/pciidevar.h> 113 #include <dev/pci/pciide_piix_reg.h> 114 #include <dev/pci/pciide_amd_reg.h> 115 #include <dev/pci/pciide_apollo_reg.h> 116 #include <dev/pci/pciide_cmd_reg.h> 117 #include <dev/pci/pciide_sii3112_reg.h> 118 #include <dev/pci/pciide_cy693_reg.h> 119 #include <dev/pci/pciide_sis_reg.h> 120 #include <dev/pci/pciide_acer_reg.h> 121 #include <dev/pci/pciide_pdc202xx_reg.h> 122 #include <dev/pci/pciide_opti_reg.h> 123 #include <dev/pci/pciide_hpt_reg.h> 124 #include <dev/pci/pciide_acard_reg.h> 125 #include <dev/pci/pciide_natsemi_reg.h> 126 #include <dev/pci/pciide_nforce_reg.h> 127 #include <dev/pci/pciide_i31244_reg.h> 128 #include <dev/pci/pciide_ite_reg.h> 129 #include <dev/pci/pciide_ixp_reg.h> 130 #include <dev/pci/pciide_svwsata_reg.h> 131 #include <dev/pci/pciide_jmicron_reg.h> 132 #include <dev/pci/cy82c693var.h> 133 134 /* functions for reading/writing 8-bit PCI registers */ 135 136 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 137 int); 138 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 139 int, u_int8_t); 140 141 u_int8_t 142 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 143 { 144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 145 ((reg & 0x03) * 8) & 0xff); 146 } 147 148 void 149 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 150 { 151 pcireg_t pcival; 152 153 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 154 pcival &= ~(0xff << ((reg & 0x03) * 8)); 155 pcival |= (val << ((reg & 0x03) * 8)); 156 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 157 } 158 159 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 160 161 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 162 void sata_setup_channel(struct channel_softc *); 163 164 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 165 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 166 void piix_setup_channel(struct channel_softc *); 167 void piix3_4_setup_channel(struct channel_softc *); 168 void piix_timing_debug(struct pciide_softc *); 169 170 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 171 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 172 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 173 174 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 175 void amd756_setup_channel(struct channel_softc *); 176 177 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 178 void apollo_setup_channel(struct channel_softc *); 179 180 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 181 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 182 void cmd0643_9_setup_channel(struct channel_softc *); 183 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 184 void cmd680_setup_channel(struct channel_softc *); 185 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 186 void cmd_channel_map(struct pci_attach_args *, 187 struct pciide_softc *, int); 188 int cmd_pci_intr(void *); 189 void cmd646_9_irqack(struct channel_softc *); 190 191 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 192 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 193 void sii3112_setup_channel(struct channel_softc *); 194 void sii3112_drv_probe(struct channel_softc *); 195 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 196 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 197 int sii3114_chansetup(struct pciide_softc *, int); 198 void sii3114_mapchan(struct pciide_channel *); 199 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 200 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 201 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 202 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 203 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 204 205 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 206 void cy693_setup_channel(struct channel_softc *); 207 208 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 209 void sis_setup_channel(struct channel_softc *); 210 void sis96x_setup_channel(struct channel_softc *); 211 int sis_hostbr_match(struct pci_attach_args *); 212 int sis_south_match(struct pci_attach_args *); 213 214 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 215 void natsemi_setup_channel(struct channel_softc *); 216 int natsemi_pci_intr(void *); 217 void natsemi_irqack(struct channel_softc *); 218 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 219 void ns_scx200_setup_channel(struct channel_softc *); 220 221 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 222 void acer_setup_channel(struct channel_softc *); 223 int acer_pci_intr(void *); 224 225 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 226 void pdc202xx_setup_channel(struct channel_softc *); 227 void pdc20268_setup_channel(struct channel_softc *); 228 int pdc202xx_pci_intr(void *); 229 int pdc20265_pci_intr(void *); 230 void pdc20262_dma_start(void *, int, int); 231 int pdc20262_dma_finish(void *, int, int, int); 232 233 u_int8_t pdc268_config_read(struct channel_softc *, int); 234 235 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 236 void pdc203xx_setup_channel(struct channel_softc *); 237 int pdc203xx_pci_intr(void *); 238 void pdc203xx_irqack(struct channel_softc *); 239 void pdc203xx_dma_start(void *,int ,int); 240 int pdc203xx_dma_finish(void *, int, int, int); 241 int pdc205xx_pci_intr(void *); 242 void pdc205xx_do_reset(struct channel_softc *); 243 void pdc205xx_drv_probe(struct channel_softc *); 244 245 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 246 void opti_setup_channel(struct channel_softc *); 247 248 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 249 void hpt_setup_channel(struct channel_softc *); 250 int hpt_pci_intr(void *); 251 252 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 253 void acard_setup_channel(struct channel_softc *); 254 255 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 256 void serverworks_setup_channel(struct channel_softc *); 257 int serverworks_pci_intr(void *); 258 259 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 260 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 261 void svwsata_mapchan(struct pciide_channel *); 262 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 263 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 264 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 265 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 266 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 267 void svwsata_drv_probe(struct channel_softc *); 268 269 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 270 void nforce_setup_channel(struct channel_softc *); 271 int nforce_pci_intr(void *); 272 273 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 274 275 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 276 void ite_setup_channel(struct channel_softc *); 277 278 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 279 void ixp_setup_channel(struct channel_softc *); 280 281 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 282 void jmicron_setup_channel(struct channel_softc *); 283 284 void phison_chip_map(struct pciide_softc *, struct pci_attach_args *); 285 void phison_setup_channel(struct channel_softc *); 286 287 void sch_chip_map(struct pciide_softc *, struct pci_attach_args *); 288 void sch_setup_channel(struct channel_softc *); 289 290 struct pciide_product_desc { 291 u_int32_t ide_product; 292 u_short ide_flags; 293 /* map and setup chip, probe drives */ 294 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 295 }; 296 297 /* Flags for ide_flags */ 298 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 299 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 300 301 /* Default product description for devices not known from this controller */ 302 const struct pciide_product_desc default_product_desc = { 303 0, /* Generic PCI IDE controller */ 304 0, 305 default_chip_map 306 }; 307 308 const struct pciide_product_desc pciide_intel_products[] = { 309 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 310 0, 311 artisea_chip_map 312 }, 313 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 314 0, 315 default_chip_map 316 }, 317 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 318 0, 319 piix_chip_map 320 }, 321 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 322 0, 323 piix_chip_map 324 }, 325 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 326 0, 327 piix_chip_map 328 }, 329 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 330 0, 331 piix_chip_map 332 }, 333 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 334 0, 335 piix_chip_map 336 }, 337 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 338 0, 339 piix_chip_map 340 }, 341 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 342 0, 343 piix_chip_map 344 }, 345 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 346 0, 347 piix_chip_map 348 }, 349 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 350 0, 351 piix_chip_map 352 }, 353 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 354 0, 355 piix_chip_map 356 }, 357 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 358 0, 359 piix_chip_map 360 }, 361 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 362 0, 363 piix_chip_map 364 }, 365 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 366 0, 367 piix_chip_map 368 }, 369 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 370 0, 371 piix_chip_map 372 }, 373 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 374 0, 375 piix_chip_map 376 }, 377 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 378 0, 379 piix_chip_map 380 }, 381 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 382 0, 383 piix_chip_map 384 }, 385 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 386 0, 387 piix_chip_map 388 }, 389 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 390 0, 391 piixsata_chip_map 392 }, 393 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 394 0, 395 piixsata_chip_map 396 }, 397 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 398 0, 399 piix_chip_map 400 }, 401 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 402 0, 403 piixsata_chip_map 404 }, 405 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 406 0, 407 piixsata_chip_map 408 }, 409 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 410 0, 411 piix_chip_map 412 }, 413 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 414 0, 415 piix_chip_map 416 }, 417 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 418 0, 419 piixsata_chip_map 420 }, 421 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 422 0, 423 piixsata_chip_map 424 }, 425 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 426 0, 427 piixsata_chip_map 428 }, 429 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 430 0, 431 piix_chip_map 432 }, 433 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 434 0, 435 piixsata_chip_map 436 }, 437 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 438 0, 439 piixsata_chip_map 440 }, 441 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 442 0, 443 piixsata_chip_map 444 }, 445 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 446 0, 447 piixsata_chip_map 448 }, 449 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 450 0, 451 piixsata_chip_map 452 }, 453 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 454 0, 455 piixsata_chip_map 456 }, 457 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 458 0, 459 piixsata_chip_map 460 }, 461 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 462 0, 463 piixsata_chip_map 464 }, 465 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 466 0, 467 piixsata_chip_map 468 }, 469 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 470 0, 471 piixsata_chip_map 472 }, 473 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 474 0, 475 piixsata_chip_map 476 }, 477 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 478 0, 479 piixsata_chip_map 480 }, 481 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 482 0, 483 piixsata_chip_map 484 }, 485 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 486 0, 487 piixsata_chip_map 488 }, 489 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 490 0, 491 piix_chip_map 492 }, 493 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 494 0, 495 piixsata_chip_map 496 }, 497 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 498 0, 499 piixsata_chip_map 500 }, 501 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 502 0, 503 piixsata_chip_map 504 }, 505 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 506 0, 507 piixsata_chip_map 508 }, 509 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 510 0, 511 piixsata_chip_map 512 }, 513 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 514 0, 515 piixsata_chip_map 516 }, 517 { PCI_PRODUCT_INTEL_82801JD_SATA_1, /* Intel 82801JD (ICH10) SATA */ 518 0, 519 piixsata_chip_map 520 }, 521 { PCI_PRODUCT_INTEL_82801JD_SATA_2, /* Intel 82801JD (ICH10) SATA */ 522 0, 523 piixsata_chip_map 524 }, 525 { PCI_PRODUCT_INTEL_82801JI_SATA_1, /* Intel 82801JI (ICH10) SATA */ 526 0, 527 piixsata_chip_map 528 }, 529 { PCI_PRODUCT_INTEL_82801JI_SATA_2, /* Intel 82801JI (ICH10) SATA */ 530 0, 531 piixsata_chip_map 532 }, 533 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 534 0, 535 piixsata_chip_map 536 }, 537 { PCI_PRODUCT_INTEL_3400_SATA_1, /* Intel 3400 SATA */ 538 0, 539 piixsata_chip_map 540 }, 541 { PCI_PRODUCT_INTEL_3400_SATA_2, /* Intel 3400 SATA */ 542 0, 543 piixsata_chip_map 544 }, 545 { PCI_PRODUCT_INTEL_3400_SATA_3, /* Intel 3400 SATA */ 546 0, 547 piixsata_chip_map 548 }, 549 { PCI_PRODUCT_INTEL_3400_SATA_4, /* Intel 3400 SATA */ 550 0, 551 piixsata_chip_map 552 }, 553 { PCI_PRODUCT_INTEL_3400_SATA_5, /* Intel 3400 SATA */ 554 0, 555 piixsata_chip_map 556 }, 557 { PCI_PRODUCT_INTEL_3400_SATA_6, /* Intel 3400 SATA */ 558 0, 559 piixsata_chip_map 560 }, 561 { PCI_PRODUCT_INTEL_SCH_IDE, /* Intel SCH IDE */ 562 0, 563 sch_chip_map 564 } 565 }; 566 567 const struct pciide_product_desc pciide_amd_products[] = { 568 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 569 0, 570 amd756_chip_map 571 }, 572 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 573 0, 574 amd756_chip_map 575 }, 576 { PCI_PRODUCT_AMD_PBC768_IDE, 577 0, 578 amd756_chip_map 579 }, 580 { PCI_PRODUCT_AMD_8111_IDE, 581 0, 582 amd756_chip_map 583 }, 584 { PCI_PRODUCT_AMD_CS5536_IDE, 585 0, 586 amd756_chip_map 587 }, 588 { PCI_PRODUCT_AMD_HUDSON2_IDE, 589 0, 590 ixp_chip_map 591 } 592 }; 593 594 #ifdef notyet 595 const struct pciide_product_desc pciide_opti_products[] = { 596 597 { PCI_PRODUCT_OPTI_82C621, 598 0, 599 opti_chip_map 600 }, 601 { PCI_PRODUCT_OPTI_82C568, 602 0, 603 opti_chip_map 604 }, 605 { PCI_PRODUCT_OPTI_82D568, 606 0, 607 opti_chip_map 608 } 609 }; 610 #endif 611 612 const struct pciide_product_desc pciide_cmd_products[] = { 613 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 614 0, 615 cmd_chip_map 616 }, 617 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 618 0, 619 cmd0643_9_chip_map 620 }, 621 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 622 0, 623 cmd0643_9_chip_map 624 }, 625 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 626 0, 627 cmd0643_9_chip_map 628 }, 629 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 630 0, 631 cmd0643_9_chip_map 632 }, 633 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 634 IDE_PCI_CLASS_OVERRIDE, 635 cmd680_chip_map 636 }, 637 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 638 0, 639 sii3112_chip_map 640 }, 641 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 642 0, 643 sii3112_chip_map 644 }, 645 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 646 0, 647 sii3112_chip_map 648 }, 649 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 650 0, 651 sii3114_chip_map 652 } 653 }; 654 655 const struct pciide_product_desc pciide_via_products[] = { 656 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 657 0, 658 apollo_chip_map 659 }, 660 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 661 0, 662 apollo_chip_map 663 }, 664 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 665 IDE_PCI_CLASS_OVERRIDE, 666 apollo_chip_map 667 }, 668 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 669 0, 670 apollo_chip_map 671 }, 672 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 673 0, 674 apollo_chip_map 675 }, 676 { PCI_PRODUCT_VIATECH_VX855_IDE, /* VIA VX855 IDE */ 677 0, 678 apollo_chip_map 679 }, 680 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 681 0, 682 sata_chip_map 683 }, 684 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 685 0, 686 sata_chip_map 687 }, 688 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 689 0, 690 sata_chip_map 691 }, 692 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 693 0, 694 sata_chip_map 695 }, 696 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 697 0, 698 sata_chip_map 699 }, 700 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 701 0, 702 sata_chip_map 703 } 704 }; 705 706 const struct pciide_product_desc pciide_cypress_products[] = { 707 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 708 IDE_16BIT_IOSPACE, 709 cy693_chip_map 710 } 711 }; 712 713 const struct pciide_product_desc pciide_sis_products[] = { 714 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 715 0, 716 sis_chip_map 717 }, 718 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 719 0, 720 sata_chip_map 721 }, 722 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 723 0, 724 sata_chip_map 725 }, 726 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 727 0, 728 sata_chip_map 729 } 730 }; 731 732 /* 733 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 734 * has been banished to the MD i386 pciide_machdep 735 */ 736 const struct pciide_product_desc pciide_natsemi_products[] = { 737 #ifdef __i386__ 738 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 739 0, 740 gcsc_chip_map 741 }, 742 #endif 743 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 744 0, 745 natsemi_chip_map 746 }, 747 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 748 0, 749 ns_scx200_chip_map 750 } 751 }; 752 753 const struct pciide_product_desc pciide_acer_products[] = { 754 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 755 0, 756 acer_chip_map 757 } 758 }; 759 760 const struct pciide_product_desc pciide_triones_products[] = { 761 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 762 IDE_PCI_CLASS_OVERRIDE, 763 hpt_chip_map, 764 }, 765 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 766 IDE_PCI_CLASS_OVERRIDE, 767 hpt_chip_map 768 }, 769 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 770 IDE_PCI_CLASS_OVERRIDE, 771 hpt_chip_map 772 }, 773 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 774 IDE_PCI_CLASS_OVERRIDE, 775 hpt_chip_map 776 }, 777 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 778 IDE_PCI_CLASS_OVERRIDE, 779 hpt_chip_map 780 } 781 }; 782 783 const struct pciide_product_desc pciide_promise_products[] = { 784 { PCI_PRODUCT_PROMISE_PDC20246, 785 IDE_PCI_CLASS_OVERRIDE, 786 pdc202xx_chip_map, 787 }, 788 { PCI_PRODUCT_PROMISE_PDC20262, 789 IDE_PCI_CLASS_OVERRIDE, 790 pdc202xx_chip_map, 791 }, 792 { PCI_PRODUCT_PROMISE_PDC20265, 793 IDE_PCI_CLASS_OVERRIDE, 794 pdc202xx_chip_map, 795 }, 796 { PCI_PRODUCT_PROMISE_PDC20267, 797 IDE_PCI_CLASS_OVERRIDE, 798 pdc202xx_chip_map, 799 }, 800 { PCI_PRODUCT_PROMISE_PDC20268, 801 IDE_PCI_CLASS_OVERRIDE, 802 pdc202xx_chip_map, 803 }, 804 { PCI_PRODUCT_PROMISE_PDC20268R, 805 IDE_PCI_CLASS_OVERRIDE, 806 pdc202xx_chip_map, 807 }, 808 { PCI_PRODUCT_PROMISE_PDC20269, 809 IDE_PCI_CLASS_OVERRIDE, 810 pdc202xx_chip_map, 811 }, 812 { PCI_PRODUCT_PROMISE_PDC20271, 813 IDE_PCI_CLASS_OVERRIDE, 814 pdc202xx_chip_map, 815 }, 816 { PCI_PRODUCT_PROMISE_PDC20275, 817 IDE_PCI_CLASS_OVERRIDE, 818 pdc202xx_chip_map, 819 }, 820 { PCI_PRODUCT_PROMISE_PDC20276, 821 IDE_PCI_CLASS_OVERRIDE, 822 pdc202xx_chip_map, 823 }, 824 { PCI_PRODUCT_PROMISE_PDC20277, 825 IDE_PCI_CLASS_OVERRIDE, 826 pdc202xx_chip_map, 827 }, 828 { PCI_PRODUCT_PROMISE_PDC20318, 829 IDE_PCI_CLASS_OVERRIDE, 830 pdcsata_chip_map, 831 }, 832 { PCI_PRODUCT_PROMISE_PDC20319, 833 IDE_PCI_CLASS_OVERRIDE, 834 pdcsata_chip_map, 835 }, 836 { PCI_PRODUCT_PROMISE_PDC20371, 837 IDE_PCI_CLASS_OVERRIDE, 838 pdcsata_chip_map, 839 }, 840 { PCI_PRODUCT_PROMISE_PDC20375, 841 IDE_PCI_CLASS_OVERRIDE, 842 pdcsata_chip_map, 843 }, 844 { PCI_PRODUCT_PROMISE_PDC20376, 845 IDE_PCI_CLASS_OVERRIDE, 846 pdcsata_chip_map, 847 }, 848 { PCI_PRODUCT_PROMISE_PDC20377, 849 IDE_PCI_CLASS_OVERRIDE, 850 pdcsata_chip_map, 851 }, 852 { PCI_PRODUCT_PROMISE_PDC20378, 853 IDE_PCI_CLASS_OVERRIDE, 854 pdcsata_chip_map, 855 }, 856 { PCI_PRODUCT_PROMISE_PDC20379, 857 IDE_PCI_CLASS_OVERRIDE, 858 pdcsata_chip_map, 859 }, 860 { PCI_PRODUCT_PROMISE_PDC40518, 861 IDE_PCI_CLASS_OVERRIDE, 862 pdcsata_chip_map, 863 }, 864 { PCI_PRODUCT_PROMISE_PDC40519, 865 IDE_PCI_CLASS_OVERRIDE, 866 pdcsata_chip_map, 867 }, 868 { PCI_PRODUCT_PROMISE_PDC40718, 869 IDE_PCI_CLASS_OVERRIDE, 870 pdcsata_chip_map, 871 }, 872 { PCI_PRODUCT_PROMISE_PDC40719, 873 IDE_PCI_CLASS_OVERRIDE, 874 pdcsata_chip_map, 875 }, 876 { PCI_PRODUCT_PROMISE_PDC40779, 877 IDE_PCI_CLASS_OVERRIDE, 878 pdcsata_chip_map, 879 }, 880 { PCI_PRODUCT_PROMISE_PDC20571, 881 IDE_PCI_CLASS_OVERRIDE, 882 pdcsata_chip_map, 883 }, 884 { PCI_PRODUCT_PROMISE_PDC20575, 885 IDE_PCI_CLASS_OVERRIDE, 886 pdcsata_chip_map, 887 }, 888 { PCI_PRODUCT_PROMISE_PDC20579, 889 IDE_PCI_CLASS_OVERRIDE, 890 pdcsata_chip_map, 891 }, 892 { PCI_PRODUCT_PROMISE_PDC20771, 893 IDE_PCI_CLASS_OVERRIDE, 894 pdcsata_chip_map, 895 }, 896 { PCI_PRODUCT_PROMISE_PDC20775, 897 IDE_PCI_CLASS_OVERRIDE, 898 pdcsata_chip_map, 899 } 900 }; 901 902 const struct pciide_product_desc pciide_acard_products[] = { 903 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 904 IDE_PCI_CLASS_OVERRIDE, 905 acard_chip_map, 906 }, 907 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 908 IDE_PCI_CLASS_OVERRIDE, 909 acard_chip_map, 910 }, 911 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 912 IDE_PCI_CLASS_OVERRIDE, 913 acard_chip_map, 914 }, 915 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 916 IDE_PCI_CLASS_OVERRIDE, 917 acard_chip_map, 918 }, 919 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 920 IDE_PCI_CLASS_OVERRIDE, 921 acard_chip_map, 922 } 923 }; 924 925 const struct pciide_product_desc pciide_serverworks_products[] = { 926 { PCI_PRODUCT_RCC_OSB4_IDE, 927 0, 928 serverworks_chip_map, 929 }, 930 { PCI_PRODUCT_RCC_CSB5_IDE, 931 0, 932 serverworks_chip_map, 933 }, 934 { PCI_PRODUCT_RCC_CSB6_IDE, 935 0, 936 serverworks_chip_map, 937 }, 938 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 939 0, 940 serverworks_chip_map, 941 }, 942 { PCI_PRODUCT_RCC_HT_1000_IDE, 943 0, 944 serverworks_chip_map, 945 }, 946 { PCI_PRODUCT_RCC_K2_SATA, 947 0, 948 svwsata_chip_map, 949 }, 950 { PCI_PRODUCT_RCC_FRODO4_SATA, 951 0, 952 svwsata_chip_map, 953 }, 954 { PCI_PRODUCT_RCC_FRODO8_SATA, 955 0, 956 svwsata_chip_map, 957 }, 958 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 959 0, 960 svwsata_chip_map, 961 }, 962 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 963 0, 964 svwsata_chip_map, 965 } 966 }; 967 968 const struct pciide_product_desc pciide_nvidia_products[] = { 969 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 970 0, 971 nforce_chip_map 972 }, 973 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 974 0, 975 nforce_chip_map 976 }, 977 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 978 0, 979 nforce_chip_map 980 }, 981 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 982 0, 983 nforce_chip_map 984 }, 985 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 986 0, 987 nforce_chip_map 988 }, 989 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 990 0, 991 nforce_chip_map 992 }, 993 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 994 0, 995 nforce_chip_map 996 }, 997 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 998 0, 999 nforce_chip_map 1000 }, 1001 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 1002 0, 1003 nforce_chip_map 1004 }, 1005 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 1006 0, 1007 nforce_chip_map 1008 }, 1009 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 1010 0, 1011 nforce_chip_map 1012 }, 1013 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 1014 0, 1015 nforce_chip_map 1016 }, 1017 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 1018 0, 1019 nforce_chip_map 1020 }, 1021 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 1022 0, 1023 nforce_chip_map 1024 }, 1025 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 1026 0, 1027 sata_chip_map 1028 }, 1029 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 1030 0, 1031 sata_chip_map 1032 }, 1033 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 1034 0, 1035 sata_chip_map 1036 }, 1037 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 1038 0, 1039 sata_chip_map 1040 }, 1041 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 1042 0, 1043 sata_chip_map 1044 }, 1045 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 1046 0, 1047 sata_chip_map 1048 }, 1049 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 1050 0, 1051 sata_chip_map 1052 }, 1053 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 1054 0, 1055 sata_chip_map 1056 }, 1057 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1058 0, 1059 sata_chip_map 1060 }, 1061 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1062 0, 1063 sata_chip_map 1064 }, 1065 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1066 0, 1067 sata_chip_map 1068 }, 1069 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1070 0, 1071 sata_chip_map 1072 }, 1073 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1074 0, 1075 sata_chip_map 1076 }, 1077 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1078 0, 1079 sata_chip_map 1080 }, 1081 { PCI_PRODUCT_NVIDIA_MCP65_SATA, 1082 0, 1083 sata_chip_map 1084 }, 1085 { PCI_PRODUCT_NVIDIA_MCP65_SATA2, 1086 0, 1087 sata_chip_map 1088 }, 1089 { PCI_PRODUCT_NVIDIA_MCP65_SATA3, 1090 0, 1091 sata_chip_map 1092 }, 1093 { PCI_PRODUCT_NVIDIA_MCP65_SATA4, 1094 0, 1095 sata_chip_map 1096 }, 1097 { PCI_PRODUCT_NVIDIA_MCP67_SATA, 1098 0, 1099 sata_chip_map 1100 }, 1101 { PCI_PRODUCT_NVIDIA_MCP67_SATA2, 1102 0, 1103 sata_chip_map 1104 }, 1105 { PCI_PRODUCT_NVIDIA_MCP67_SATA3, 1106 0, 1107 sata_chip_map 1108 }, 1109 { PCI_PRODUCT_NVIDIA_MCP67_SATA4, 1110 0, 1111 sata_chip_map 1112 }, 1113 { PCI_PRODUCT_NVIDIA_MCP77_SATA_1, 1114 0, 1115 sata_chip_map 1116 }, 1117 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1118 0, 1119 sata_chip_map 1120 }, 1121 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1122 0, 1123 sata_chip_map 1124 }, 1125 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1126 0, 1127 sata_chip_map 1128 }, 1129 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1130 0, 1131 sata_chip_map 1132 } 1133 }; 1134 1135 const struct pciide_product_desc pciide_ite_products[] = { 1136 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1137 IDE_PCI_CLASS_OVERRIDE, 1138 ite_chip_map 1139 }, 1140 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1141 IDE_PCI_CLASS_OVERRIDE, 1142 ite_chip_map 1143 } 1144 }; 1145 1146 const struct pciide_product_desc pciide_ati_products[] = { 1147 { PCI_PRODUCT_ATI_SB200_IDE, 1148 0, 1149 ixp_chip_map 1150 }, 1151 { PCI_PRODUCT_ATI_SB300_IDE, 1152 0, 1153 ixp_chip_map 1154 }, 1155 { PCI_PRODUCT_ATI_SB400_IDE, 1156 0, 1157 ixp_chip_map 1158 }, 1159 { PCI_PRODUCT_ATI_SB600_IDE, 1160 0, 1161 ixp_chip_map 1162 }, 1163 { PCI_PRODUCT_ATI_SB700_IDE, 1164 0, 1165 ixp_chip_map 1166 }, 1167 { PCI_PRODUCT_ATI_SB300_SATA, 1168 0, 1169 sii3112_chip_map 1170 }, 1171 { PCI_PRODUCT_ATI_SB400_SATA_1, 1172 0, 1173 sii3112_chip_map 1174 }, 1175 { PCI_PRODUCT_ATI_SB400_SATA_2, 1176 0, 1177 sii3112_chip_map 1178 } 1179 }; 1180 1181 const struct pciide_product_desc pciide_jmicron_products[] = { 1182 { PCI_PRODUCT_JMICRON_JMB361, 1183 0, 1184 jmicron_chip_map 1185 }, 1186 { PCI_PRODUCT_JMICRON_JMB363, 1187 0, 1188 jmicron_chip_map 1189 }, 1190 { PCI_PRODUCT_JMICRON_JMB365, 1191 0, 1192 jmicron_chip_map 1193 }, 1194 { PCI_PRODUCT_JMICRON_JMB366, 1195 0, 1196 jmicron_chip_map 1197 }, 1198 { PCI_PRODUCT_JMICRON_JMB368, 1199 0, 1200 jmicron_chip_map 1201 } 1202 }; 1203 1204 const struct pciide_product_desc pciide_phison_products[] = { 1205 { PCI_PRODUCT_PHISON_PS5000, 1206 0, 1207 phison_chip_map 1208 }, 1209 }; 1210 1211 struct pciide_vendor_desc { 1212 u_int32_t ide_vendor; 1213 const struct pciide_product_desc *ide_products; 1214 int ide_nproducts; 1215 }; 1216 1217 const struct pciide_vendor_desc pciide_vendors[] = { 1218 { PCI_VENDOR_INTEL, pciide_intel_products, 1219 sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) }, 1220 { PCI_VENDOR_AMD, pciide_amd_products, 1221 sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) }, 1222 #ifdef notyet 1223 { PCI_VENDOR_OPTI, pciide_opti_products, 1224 sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) }, 1225 #endif 1226 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1227 sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) }, 1228 { PCI_VENDOR_VIATECH, pciide_via_products, 1229 sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) }, 1230 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1231 sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) }, 1232 { PCI_VENDOR_SIS, pciide_sis_products, 1233 sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) }, 1234 { PCI_VENDOR_NS, pciide_natsemi_products, 1235 sizeof(pciide_natsemi_products)/sizeof(pciide_natsemi_products[0]) }, 1236 { PCI_VENDOR_ALI, pciide_acer_products, 1237 sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) }, 1238 { PCI_VENDOR_TRIONES, pciide_triones_products, 1239 sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) }, 1240 { PCI_VENDOR_ACARD, pciide_acard_products, 1241 sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) }, 1242 { PCI_VENDOR_RCC, pciide_serverworks_products, 1243 sizeof(pciide_serverworks_products)/sizeof(pciide_serverworks_products[0]) }, 1244 { PCI_VENDOR_PROMISE, pciide_promise_products, 1245 sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }, 1246 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1247 sizeof(pciide_nvidia_products)/sizeof(pciide_nvidia_products[0]) }, 1248 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1249 sizeof(pciide_ite_products)/sizeof(pciide_ite_products[0]) }, 1250 { PCI_VENDOR_ATI, pciide_ati_products, 1251 sizeof(pciide_ati_products)/sizeof(pciide_ati_products[0]) }, 1252 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1253 sizeof(pciide_jmicron_products)/sizeof(pciide_jmicron_products[0]) }, 1254 { PCI_VENDOR_PHISON, pciide_phison_products, 1255 sizeof(pciide_phison_products)/sizeof(pciide_phison_products[0]) } 1256 }; 1257 1258 /* options passed via the 'flags' config keyword */ 1259 #define PCIIDE_OPTIONS_DMA 0x01 1260 1261 int pciide_match(struct device *, void *, void *); 1262 void pciide_attach(struct device *, struct device *, void *); 1263 int pciide_detach(struct device *, int); 1264 1265 struct cfattach pciide_pci_ca = { 1266 sizeof(struct pciide_softc), pciide_match, pciide_attach, pciide_detach, 1267 }; 1268 1269 struct cfattach pciide_jmb_ca = { 1270 sizeof(struct pciide_softc), pciide_match, pciide_attach, pciide_detach, 1271 }; 1272 1273 struct cfdriver pciide_cd = { 1274 NULL, "pciide", DV_DULL 1275 }; 1276 1277 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1278 1279 const struct pciide_product_desc * 1280 pciide_lookup_product(u_int32_t id) 1281 { 1282 const struct pciide_product_desc *pp; 1283 const struct pciide_vendor_desc *vp; 1284 int i; 1285 1286 for (i = 0, vp = pciide_vendors; 1287 i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]); 1288 vp++, i++) 1289 if (PCI_VENDOR(id) == vp->ide_vendor) 1290 break; 1291 1292 if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0])) 1293 return (NULL); 1294 1295 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1296 if (PCI_PRODUCT(id) == pp->ide_product) 1297 break; 1298 1299 if (i == vp->ide_nproducts) 1300 return (NULL); 1301 return (pp); 1302 } 1303 1304 int 1305 pciide_match(struct device *parent, void *match, void *aux) 1306 { 1307 struct pci_attach_args *pa = aux; 1308 const struct pciide_product_desc *pp; 1309 1310 /* 1311 * Some IDE controllers have severe bugs when used in PCI mode. 1312 * We punt and attach them to the ISA bus instead. 1313 */ 1314 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1315 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1316 return (0); 1317 1318 /* 1319 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1320 * controllers. Let see if we can deal with it anyway. 1321 */ 1322 pp = pciide_lookup_product(pa->pa_id); 1323 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1324 return (1); 1325 1326 /* 1327 * Check the ID register to see that it's a PCI IDE controller. 1328 * If it is, we assume that we can deal with it; it _should_ 1329 * work in a standardized way... 1330 */ 1331 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1332 switch (PCI_SUBCLASS(pa->pa_class)) { 1333 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1334 return (1); 1335 1336 /* 1337 * We only match these if we know they have 1338 * a match, as we may not support native interfaces 1339 * on them. 1340 */ 1341 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1342 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1343 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1344 if (pp) 1345 return (1); 1346 else 1347 return (0); 1348 break; 1349 } 1350 } 1351 1352 return (0); 1353 } 1354 1355 void 1356 pciide_attach(struct device *parent, struct device *self, void *aux) 1357 { 1358 struct pciide_softc *sc = (struct pciide_softc *)self; 1359 struct pci_attach_args *pa = aux; 1360 1361 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1362 if (sc->sc_pp == NULL) 1363 sc->sc_pp = &default_product_desc; 1364 sc->sc_rev = PCI_REVISION(pa->pa_class); 1365 1366 sc->sc_pc = pa->pa_pc; 1367 sc->sc_tag = pa->pa_tag; 1368 1369 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1370 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1371 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1372 1373 sc->sc_dmacmd_read = pciide_dmacmd_read; 1374 sc->sc_dmacmd_write = pciide_dmacmd_write; 1375 sc->sc_dmactl_read = pciide_dmactl_read; 1376 sc->sc_dmactl_write = pciide_dmactl_write; 1377 sc->sc_dmatbl_write = pciide_dmatbl_write; 1378 1379 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1380 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1381 1382 sc->sc_pp->chip_map(sc, pa); 1383 1384 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1385 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1386 DEBUG_PROBE); 1387 } 1388 1389 int 1390 pciide_detach(struct device *self, int flags) 1391 { 1392 struct pciide_softc *sc = (struct pciide_softc *)self; 1393 if (sc->chip_unmap == NULL) 1394 panic("unmap not yet implemented for this chipset"); 1395 else 1396 sc->chip_unmap(sc, flags); 1397 1398 return 0; 1399 } 1400 1401 int 1402 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1403 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1404 { 1405 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1406 struct channel_softc *wdc_cp = &cp->wdc_channel; 1407 pcireg_t csr; 1408 1409 cp->compat = 1; 1410 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1411 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1412 1413 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1414 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1415 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1416 1417 wdc_cp->cmd_iot = pa->pa_iot; 1418 1419 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1420 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1421 printf("%s: couldn't map %s cmd regs\n", 1422 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1423 return (0); 1424 } 1425 1426 wdc_cp->ctl_iot = pa->pa_iot; 1427 1428 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1429 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1430 printf("%s: couldn't map %s ctl regs\n", 1431 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1432 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1433 PCIIDE_COMPAT_CMD_SIZE); 1434 return (0); 1435 } 1436 wdc_cp->cmd_iosz = *cmdsizep; 1437 wdc_cp->ctl_iosz = *ctlsizep; 1438 1439 return (1); 1440 } 1441 1442 int 1443 pciide_unmapregs_compat(struct pciide_softc *sc, struct pciide_channel *cp) 1444 { 1445 struct channel_softc *wdc_cp = &cp->wdc_channel; 1446 1447 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1448 bus_space_unmap(wdc_cp->ctl_iot, wdc_cp->cmd_ioh, wdc_cp->ctl_iosz); 1449 1450 if (sc->sc_pci_ih != NULL) { 1451 pciide_machdep_compat_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1452 sc->sc_pci_ih = NULL; 1453 } 1454 1455 return (0); 1456 } 1457 1458 int 1459 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1460 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1461 { 1462 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1463 struct channel_softc *wdc_cp = &cp->wdc_channel; 1464 const char *intrstr; 1465 pci_intr_handle_t intrhandle; 1466 pcireg_t maptype; 1467 1468 cp->compat = 0; 1469 1470 if (sc->sc_pci_ih == NULL) { 1471 if (pci_intr_map(pa, &intrhandle) != 0) { 1472 printf("%s: couldn't map native-PCI interrupt\n", 1473 sc->sc_wdcdev.sc_dev.dv_xname); 1474 return (0); 1475 } 1476 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1477 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1478 intrhandle, IPL_BIO, pci_intr, sc, 1479 sc->sc_wdcdev.sc_dev.dv_xname); 1480 if (sc->sc_pci_ih != NULL) { 1481 printf("%s: using %s for native-PCI interrupt\n", 1482 sc->sc_wdcdev.sc_dev.dv_xname, 1483 intrstr ? intrstr : "unknown interrupt"); 1484 } else { 1485 printf("%s: couldn't establish native-PCI interrupt", 1486 sc->sc_wdcdev.sc_dev.dv_xname); 1487 if (intrstr != NULL) 1488 printf(" at %s", intrstr); 1489 printf("\n"); 1490 return (0); 1491 } 1492 } 1493 cp->ih = sc->sc_pci_ih; 1494 sc->sc_pc = pa->pa_pc; 1495 1496 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1497 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1498 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1499 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1500 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1501 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1502 maptype, 0, 1503 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1504 printf("%s: couldn't map %s cmd regs\n", 1505 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1506 return (0); 1507 } 1508 1509 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1510 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1511 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1512 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1513 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1514 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1515 maptype, 0, 1516 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1517 printf("%s: couldn't map %s ctl regs\n", 1518 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1519 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1520 return (0); 1521 } 1522 /* 1523 * In native mode, 4 bytes of I/O space are mapped for the control 1524 * register, the control register is at offset 2. Pass the generic 1525 * code a handle for only one byte at the right offset. 1526 */ 1527 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1528 &wdc_cp->ctl_ioh) != 0) { 1529 printf("%s: unable to subregion %s ctl regs\n", 1530 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1531 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1532 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1533 return (0); 1534 } 1535 wdc_cp->cmd_iosz = *cmdsizep; 1536 wdc_cp->ctl_iosz = *ctlsizep; 1537 1538 return (1); 1539 } 1540 1541 int 1542 pciide_unmapregs_native(struct pciide_softc *sc, struct pciide_channel *cp) 1543 { 1544 struct channel_softc *wdc_cp = &cp->wdc_channel; 1545 1546 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1547 1548 /* Unmap the whole control space, not just the sub-region */ 1549 bus_space_unmap(wdc_cp->ctl_iot, cp->ctl_baseioh, wdc_cp->ctl_iosz); 1550 1551 if (sc->sc_pci_ih != NULL) { 1552 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1553 sc->sc_pci_ih = NULL; 1554 } 1555 1556 return (0); 1557 } 1558 1559 void 1560 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1561 { 1562 pcireg_t maptype; 1563 bus_addr_t addr; 1564 1565 /* 1566 * Map DMA registers 1567 * 1568 * Note that sc_dma_ok is the right variable to test to see if 1569 * DMA can be done. If the interface doesn't support DMA, 1570 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1571 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1572 * non-zero if the interface supports DMA and the registers 1573 * could be mapped. 1574 * 1575 * XXX Note that despite the fact that the Bus Master IDE specs 1576 * XXX say that "The bus master IDE function uses 16 bytes of IO 1577 * XXX space", some controllers (at least the United 1578 * XXX Microelectronics UM8886BF) place it in memory space. 1579 */ 1580 1581 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1582 PCIIDE_REG_BUS_MASTER_DMA); 1583 1584 switch (maptype) { 1585 case PCI_MAPREG_TYPE_IO: 1586 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1587 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1588 &addr, NULL, NULL) == 0); 1589 if (sc->sc_dma_ok == 0) { 1590 printf(", unused (couldn't query registers)"); 1591 break; 1592 } 1593 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1594 && addr >= 0x10000) { 1595 sc->sc_dma_ok = 0; 1596 printf(", unused (registers at unsafe address %#lx)", addr); 1597 break; 1598 } 1599 /* FALLTHROUGH */ 1600 1601 case PCI_MAPREG_MEM_TYPE_32BIT: 1602 sc->sc_dma_ok = (pci_mapreg_map(pa, 1603 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1604 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_iosz, 1605 0) == 0); 1606 sc->sc_dmat = pa->pa_dmat; 1607 if (sc->sc_dma_ok == 0) { 1608 printf(", unused (couldn't map registers)"); 1609 } else { 1610 sc->sc_wdcdev.dma_arg = sc; 1611 sc->sc_wdcdev.dma_init = pciide_dma_init; 1612 sc->sc_wdcdev.dma_start = pciide_dma_start; 1613 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1614 } 1615 break; 1616 1617 default: 1618 sc->sc_dma_ok = 0; 1619 printf(", (unsupported maptype 0x%x)", maptype); 1620 break; 1621 } 1622 } 1623 1624 void 1625 pciide_unmapreg_dma(struct pciide_softc *sc) 1626 { 1627 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_iosz); 1628 } 1629 1630 int 1631 pciide_intr_flag(struct pciide_channel *cp) 1632 { 1633 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1634 int chan = cp->wdc_channel.channel; 1635 1636 if (cp->dma_in_progress) { 1637 int retry = 10; 1638 int status; 1639 1640 /* Check the status register */ 1641 for (retry = 10; retry > 0; retry--) { 1642 status = PCIIDE_DMACTL_READ(sc, chan); 1643 if (status & IDEDMA_CTL_INTR) { 1644 break; 1645 } 1646 DELAY(5); 1647 } 1648 1649 /* Not for us. */ 1650 if (retry == 0) 1651 return (0); 1652 1653 return (1); 1654 } 1655 1656 return (-1); 1657 } 1658 1659 int 1660 pciide_compat_intr(void *arg) 1661 { 1662 struct pciide_channel *cp = arg; 1663 1664 if (pciide_intr_flag(cp) == 0) 1665 return (0); 1666 1667 #ifdef DIAGNOSTIC 1668 /* should only be called for a compat channel */ 1669 if (cp->compat == 0) 1670 panic("pciide compat intr called for non-compat chan %p", cp); 1671 #endif 1672 return (wdcintr(&cp->wdc_channel)); 1673 } 1674 1675 int 1676 pciide_pci_intr(void *arg) 1677 { 1678 struct pciide_softc *sc = arg; 1679 struct pciide_channel *cp; 1680 struct channel_softc *wdc_cp; 1681 int i, rv, crv; 1682 1683 rv = 0; 1684 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1685 cp = &sc->pciide_channels[i]; 1686 wdc_cp = &cp->wdc_channel; 1687 1688 /* If a compat channel skip. */ 1689 if (cp->compat) 1690 continue; 1691 1692 if (pciide_intr_flag(cp) == 0) 1693 continue; 1694 1695 crv = wdcintr(wdc_cp); 1696 if (crv == 0) 1697 ; /* leave rv alone */ 1698 else if (crv == 1) 1699 rv = 1; /* claim the intr */ 1700 else if (rv == 0) /* crv should be -1 in this case */ 1701 rv = crv; /* if we've done no better, take it */ 1702 } 1703 return (rv); 1704 } 1705 1706 u_int8_t 1707 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1708 { 1709 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1710 IDEDMA_CMD(chan))); 1711 } 1712 1713 void 1714 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1715 { 1716 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1717 IDEDMA_CMD(chan), val); 1718 } 1719 1720 u_int8_t 1721 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1722 { 1723 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1724 IDEDMA_CTL(chan))); 1725 } 1726 1727 void 1728 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 1729 { 1730 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1731 IDEDMA_CTL(chan), val); 1732 } 1733 1734 void 1735 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 1736 { 1737 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1738 IDEDMA_TBL(chan), val); 1739 } 1740 1741 void 1742 pciide_channel_dma_setup(struct pciide_channel *cp) 1743 { 1744 int drive; 1745 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1746 struct ata_drive_datas *drvp; 1747 1748 for (drive = 0; drive < 2; drive++) { 1749 drvp = &cp->wdc_channel.ch_drive[drive]; 1750 /* If no drive, skip */ 1751 if ((drvp->drive_flags & DRIVE) == 0) 1752 continue; 1753 /* setup DMA if needed */ 1754 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1755 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1756 sc->sc_dma_ok == 0) { 1757 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1758 continue; 1759 } 1760 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1761 != 0) { 1762 /* Abort DMA setup */ 1763 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1764 continue; 1765 } 1766 } 1767 } 1768 1769 int 1770 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 1771 { 1772 bus_dma_segment_t seg; 1773 int error, rseg; 1774 const bus_size_t dma_table_size = 1775 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1776 struct pciide_dma_maps *dma_maps = 1777 &sc->pciide_channels[channel].dma_maps[drive]; 1778 1779 /* If table was already allocated, just return */ 1780 if (dma_maps->dma_table) 1781 return (0); 1782 1783 /* Allocate memory for the DMA tables and map it */ 1784 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1785 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1786 BUS_DMA_NOWAIT)) != 0) { 1787 printf("%s:%d: unable to allocate table DMA for " 1788 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1789 channel, drive, error); 1790 return (error); 1791 } 1792 1793 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1794 dma_table_size, 1795 (caddr_t *)&dma_maps->dma_table, 1796 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1797 printf("%s:%d: unable to map table DMA for" 1798 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1799 channel, drive, error); 1800 return (error); 1801 } 1802 1803 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 1804 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 1805 seg.ds_addr), DEBUG_PROBE); 1806 1807 /* Create and load table DMA map for this disk */ 1808 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1809 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1810 &dma_maps->dmamap_table)) != 0) { 1811 printf("%s:%d: unable to create table DMA map for " 1812 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1813 channel, drive, error); 1814 return (error); 1815 } 1816 if ((error = bus_dmamap_load(sc->sc_dmat, 1817 dma_maps->dmamap_table, 1818 dma_maps->dma_table, 1819 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1820 printf("%s:%d: unable to load table DMA map for " 1821 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1822 channel, drive, error); 1823 return (error); 1824 } 1825 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1826 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 1827 /* Create a xfer DMA map for this drive */ 1828 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1829 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 1830 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1831 &dma_maps->dmamap_xfer)) != 0) { 1832 printf("%s:%d: unable to create xfer DMA map for " 1833 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1834 channel, drive, error); 1835 return (error); 1836 } 1837 return (0); 1838 } 1839 1840 int 1841 pciide_dma_init(void *v, int channel, int drive, void *databuf, 1842 size_t datalen, int flags) 1843 { 1844 struct pciide_softc *sc = v; 1845 int error, seg; 1846 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1847 struct pciide_dma_maps *dma_maps = 1848 &sc->pciide_channels[channel].dma_maps[drive]; 1849 #ifndef BUS_DMA_RAW 1850 #define BUS_DMA_RAW 0 1851 #endif 1852 1853 error = bus_dmamap_load(sc->sc_dmat, 1854 dma_maps->dmamap_xfer, 1855 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 1856 if (error) { 1857 printf("%s:%d: unable to load xfer DMA map for " 1858 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1859 channel, drive, error); 1860 return (error); 1861 } 1862 1863 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1864 dma_maps->dmamap_xfer->dm_mapsize, 1865 (flags & WDC_DMA_READ) ? 1866 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1867 1868 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1869 #ifdef DIAGNOSTIC 1870 /* A segment must not cross a 64k boundary */ 1871 { 1872 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1873 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1874 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1875 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1876 printf("pciide_dma: segment %d physical addr 0x%lx" 1877 " len 0x%lx not properly aligned\n", 1878 seg, phys, len); 1879 panic("pciide_dma: buf align"); 1880 } 1881 } 1882 #endif 1883 dma_maps->dma_table[seg].base_addr = 1884 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1885 dma_maps->dma_table[seg].byte_count = 1886 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1887 IDEDMA_BYTE_COUNT_MASK); 1888 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1889 seg, letoh32(dma_maps->dma_table[seg].byte_count), 1890 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1891 1892 } 1893 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1894 htole32(IDEDMA_BYTE_COUNT_EOT); 1895 1896 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1897 dma_maps->dmamap_table->dm_mapsize, 1898 BUS_DMASYNC_PREWRITE); 1899 1900 /* Maps are ready. Start DMA function */ 1901 #ifdef DIAGNOSTIC 1902 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1903 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1904 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1905 panic("pciide_dma_init: table align"); 1906 } 1907 #endif 1908 1909 /* Clear status bits */ 1910 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 1911 /* Write table addr */ 1912 PCIIDE_DMATBL_WRITE(sc, channel, 1913 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1914 /* set read/write */ 1915 PCIIDE_DMACMD_WRITE(sc, channel, 1916 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 1917 /* remember flags */ 1918 dma_maps->dma_flags = flags; 1919 return (0); 1920 } 1921 1922 void 1923 pciide_dma_start(void *v, int channel, int drive) 1924 { 1925 struct pciide_softc *sc = v; 1926 1927 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 1928 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 1929 IDEDMA_CMD_START); 1930 1931 sc->pciide_channels[channel].dma_in_progress = 1; 1932 } 1933 1934 int 1935 pciide_dma_finish(void *v, int channel, int drive, int force) 1936 { 1937 struct pciide_softc *sc = v; 1938 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1939 u_int8_t status; 1940 int error = 0; 1941 struct pciide_dma_maps *dma_maps = 1942 &sc->pciide_channels[channel].dma_maps[drive]; 1943 1944 status = PCIIDE_DMACTL_READ(sc, channel); 1945 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1946 DEBUG_XFERS); 1947 1948 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 1949 error = WDC_DMAST_NOIRQ; 1950 goto done; 1951 } 1952 1953 /* stop DMA channel */ 1954 PCIIDE_DMACMD_WRITE(sc, channel, 1955 ((dma_maps->dma_flags & WDC_DMA_READ) ? 1956 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 1957 1958 /* Unload the map of the data buffer */ 1959 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1960 dma_maps->dmamap_xfer->dm_mapsize, 1961 (dma_maps->dma_flags & WDC_DMA_READ) ? 1962 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1963 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1964 1965 /* Clear status bits */ 1966 PCIIDE_DMACTL_WRITE(sc, channel, status); 1967 1968 if ((status & IDEDMA_CTL_ERR) != 0) { 1969 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1970 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1971 error |= WDC_DMAST_ERR; 1972 } 1973 1974 if ((status & IDEDMA_CTL_INTR) == 0) { 1975 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1976 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1977 drive, status); 1978 error |= WDC_DMAST_NOIRQ; 1979 } 1980 1981 if ((status & IDEDMA_CTL_ACT) != 0) { 1982 /* data underrun, may be a valid condition for ATAPI */ 1983 error |= WDC_DMAST_UNDER; 1984 } 1985 1986 done: 1987 sc->pciide_channels[channel].dma_in_progress = 0; 1988 return (error); 1989 } 1990 1991 void 1992 pciide_irqack(struct channel_softc *chp) 1993 { 1994 struct pciide_channel *cp = (struct pciide_channel *)chp; 1995 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1996 int chan = chp->channel; 1997 1998 /* clear status bits in IDE DMA registers */ 1999 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 2000 } 2001 2002 /* some common code used by several chip_map */ 2003 int 2004 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 2005 { 2006 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2007 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2008 cp->name = PCIIDE_CHANNEL_NAME(channel); 2009 cp->wdc_channel.channel = channel; 2010 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2011 cp->wdc_channel.ch_queue = 2012 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2013 if (cp->wdc_channel.ch_queue == NULL) { 2014 printf("%s: %s " 2015 "cannot allocate memory for command queue", 2016 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2017 return (0); 2018 } 2019 cp->hw_ok = 1; 2020 2021 return (1); 2022 } 2023 2024 void 2025 pciide_chanfree(struct pciide_softc *sc, int channel) 2026 { 2027 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2028 if (cp->wdc_channel.ch_queue) 2029 free(cp->wdc_channel.ch_queue, M_DEVBUF); 2030 } 2031 2032 /* some common code used by several chip channel_map */ 2033 void 2034 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 2035 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 2036 int (*pci_intr)(void *)) 2037 { 2038 struct channel_softc *wdc_cp = &cp->wdc_channel; 2039 2040 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 2041 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 2042 pci_intr); 2043 else 2044 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2045 wdc_cp->channel, cmdsizep, ctlsizep); 2046 if (cp->hw_ok == 0) 2047 return; 2048 wdc_cp->data32iot = wdc_cp->cmd_iot; 2049 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 2050 wdcattach(wdc_cp); 2051 } 2052 2053 void 2054 pciide_unmap_chan(struct pciide_softc *sc, struct pciide_channel *cp, int flags) 2055 { 2056 struct channel_softc *wdc_cp = &cp->wdc_channel; 2057 2058 wdcdetach(wdc_cp, flags); 2059 2060 if (cp->compat != 0) 2061 pciide_unmapregs_compat(sc, cp); 2062 else 2063 pciide_unmapregs_native(sc, cp); 2064 } 2065 2066 /* 2067 * Generic code to call to know if a channel can be disabled. Return 1 2068 * if channel can be disabled, 0 if not 2069 */ 2070 int 2071 pciide_chan_candisable(struct pciide_channel *cp) 2072 { 2073 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2074 struct channel_softc *wdc_cp = &cp->wdc_channel; 2075 2076 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 2077 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 2078 printf("%s: %s disabled (no drives)\n", 2079 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2080 cp->hw_ok = 0; 2081 return (1); 2082 } 2083 return (0); 2084 } 2085 2086 /* 2087 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 2088 * Set hw_ok=0 on failure 2089 */ 2090 void 2091 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2092 int compatchan, int interface) 2093 { 2094 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2095 struct channel_softc *wdc_cp = &cp->wdc_channel; 2096 2097 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2098 return; 2099 2100 cp->compat = 1; 2101 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 2102 pa, compatchan, pciide_compat_intr, cp); 2103 if (cp->ih == NULL) { 2104 printf("%s: no compatibility interrupt for use by %s\n", 2105 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2106 cp->hw_ok = 0; 2107 } 2108 } 2109 2110 /* 2111 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 2112 * Set hw_ok=0 on failure 2113 */ 2114 void 2115 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2116 int compatchan, int interface) 2117 { 2118 struct channel_softc *wdc_cp = &cp->wdc_channel; 2119 2120 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2121 return; 2122 2123 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 2124 } 2125 2126 void 2127 pciide_print_channels(int nchannels, pcireg_t interface) 2128 { 2129 int i; 2130 2131 for (i = 0; i < nchannels; i++) { 2132 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 2133 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 2134 "configured" : "wired", 2135 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 2136 "compatibility"); 2137 } 2138 2139 printf("\n"); 2140 } 2141 2142 void 2143 pciide_print_modes(struct pciide_channel *cp) 2144 { 2145 wdc_print_current_modes(&cp->wdc_channel); 2146 } 2147 2148 void 2149 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2150 { 2151 struct pciide_channel *cp; 2152 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2153 pcireg_t csr; 2154 int channel, drive; 2155 struct ata_drive_datas *drvp; 2156 u_int8_t idedma_ctl; 2157 bus_size_t cmdsize, ctlsize; 2158 char *failreason; 2159 2160 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2161 printf(": DMA"); 2162 if (sc->sc_pp == &default_product_desc && 2163 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2164 PCIIDE_OPTIONS_DMA) == 0) { 2165 printf(" (unsupported)"); 2166 sc->sc_dma_ok = 0; 2167 } else { 2168 pciide_mapreg_dma(sc, pa); 2169 if (sc->sc_dma_ok != 0) 2170 printf(", (partial support)"); 2171 } 2172 } else { 2173 printf(": no DMA"); 2174 sc->sc_dma_ok = 0; 2175 } 2176 if (sc->sc_dma_ok) { 2177 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2178 sc->sc_wdcdev.irqack = pciide_irqack; 2179 } 2180 sc->sc_wdcdev.PIO_cap = 0; 2181 sc->sc_wdcdev.DMA_cap = 0; 2182 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2183 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2184 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2185 2186 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2187 2188 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2189 cp = &sc->pciide_channels[channel]; 2190 if (pciide_chansetup(sc, channel, interface) == 0) 2191 continue; 2192 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2193 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2194 &ctlsize, pciide_pci_intr); 2195 } else { 2196 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2197 channel, &cmdsize, &ctlsize); 2198 } 2199 if (cp->hw_ok == 0) 2200 continue; 2201 /* 2202 * Check to see if something appears to be there. 2203 */ 2204 failreason = NULL; 2205 pciide_map_compat_intr(pa, cp, channel, interface); 2206 if (cp->hw_ok == 0) 2207 continue; 2208 if (!wdcprobe(&cp->wdc_channel)) { 2209 failreason = "not responding; disabled or no drives?"; 2210 goto next; 2211 } 2212 /* 2213 * Now, make sure it's actually attributable to this PCI IDE 2214 * channel by trying to access the channel again while the 2215 * PCI IDE controller's I/O space is disabled. (If the 2216 * channel no longer appears to be there, it belongs to 2217 * this controller.) YUCK! 2218 */ 2219 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2220 PCI_COMMAND_STATUS_REG); 2221 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2222 csr & ~PCI_COMMAND_IO_ENABLE); 2223 if (wdcprobe(&cp->wdc_channel)) 2224 failreason = "other hardware responding at addresses"; 2225 pci_conf_write(sc->sc_pc, sc->sc_tag, 2226 PCI_COMMAND_STATUS_REG, csr); 2227 next: 2228 if (failreason) { 2229 printf("%s: %s ignored (%s)\n", 2230 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2231 failreason); 2232 cp->hw_ok = 0; 2233 pciide_unmap_compat_intr(pa, cp, channel, interface); 2234 bus_space_unmap(cp->wdc_channel.cmd_iot, 2235 cp->wdc_channel.cmd_ioh, cmdsize); 2236 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2237 bus_space_unmap(cp->wdc_channel.ctl_iot, 2238 cp->ctl_baseioh, ctlsize); 2239 else 2240 bus_space_unmap(cp->wdc_channel.ctl_iot, 2241 cp->wdc_channel.ctl_ioh, ctlsize); 2242 } 2243 if (cp->hw_ok) { 2244 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2245 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2246 wdcattach(&cp->wdc_channel); 2247 } 2248 } 2249 2250 if (sc->sc_dma_ok == 0) 2251 return; 2252 2253 /* Allocate DMA maps */ 2254 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2255 idedma_ctl = 0; 2256 cp = &sc->pciide_channels[channel]; 2257 for (drive = 0; drive < 2; drive++) { 2258 drvp = &cp->wdc_channel.ch_drive[drive]; 2259 /* If no drive, skip */ 2260 if ((drvp->drive_flags & DRIVE) == 0) 2261 continue; 2262 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2263 continue; 2264 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2265 /* Abort DMA setup */ 2266 printf("%s:%d:%d: cannot allocate DMA maps, " 2267 "using PIO transfers\n", 2268 sc->sc_wdcdev.sc_dev.dv_xname, 2269 channel, drive); 2270 drvp->drive_flags &= ~DRIVE_DMA; 2271 } 2272 printf("%s:%d:%d: using DMA data transfers\n", 2273 sc->sc_wdcdev.sc_dev.dv_xname, 2274 channel, drive); 2275 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2276 } 2277 if (idedma_ctl != 0) { 2278 /* Add software bits in status register */ 2279 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2280 } 2281 } 2282 } 2283 2284 void 2285 default_chip_unmap(struct pciide_softc *sc, int flags) 2286 { 2287 struct pciide_channel *cp; 2288 int channel; 2289 2290 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2291 cp = &sc->pciide_channels[channel]; 2292 pciide_unmap_chan(sc, cp, flags); 2293 pciide_chanfree(sc, channel); 2294 } 2295 2296 pciide_unmapreg_dma(sc); 2297 2298 if (sc->sc_cookie) 2299 free(sc->sc_cookie, M_DEVBUF); 2300 } 2301 2302 void 2303 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2304 { 2305 struct pciide_channel *cp; 2306 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2307 int channel; 2308 bus_size_t cmdsize, ctlsize; 2309 2310 if (interface == 0) { 2311 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2312 DEBUG_PROBE); 2313 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2314 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2315 } 2316 2317 printf(": DMA"); 2318 pciide_mapreg_dma(sc, pa); 2319 printf("\n"); 2320 2321 if (sc->sc_dma_ok) { 2322 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2323 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2324 sc->sc_wdcdev.irqack = pciide_irqack; 2325 } 2326 sc->sc_wdcdev.PIO_cap = 4; 2327 sc->sc_wdcdev.DMA_cap = 2; 2328 sc->sc_wdcdev.UDMA_cap = 6; 2329 2330 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2331 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2332 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2333 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2334 sc->sc_wdcdev.set_modes = sata_setup_channel; 2335 2336 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2337 cp = &sc->pciide_channels[channel]; 2338 if (pciide_chansetup(sc, channel, interface) == 0) 2339 continue; 2340 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2341 pciide_pci_intr); 2342 sata_setup_channel(&cp->wdc_channel); 2343 } 2344 } 2345 2346 void 2347 sata_setup_channel(struct channel_softc *chp) 2348 { 2349 struct ata_drive_datas *drvp; 2350 int drive; 2351 u_int32_t idedma_ctl; 2352 struct pciide_channel *cp = (struct pciide_channel *)chp; 2353 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2354 2355 /* setup DMA if needed */ 2356 pciide_channel_dma_setup(cp); 2357 2358 idedma_ctl = 0; 2359 2360 for (drive = 0; drive < 2; drive++) { 2361 drvp = &chp->ch_drive[drive]; 2362 /* If no drive, skip */ 2363 if ((drvp->drive_flags & DRIVE) == 0) 2364 continue; 2365 if (drvp->drive_flags & DRIVE_UDMA) { 2366 /* use Ultra/DMA */ 2367 drvp->drive_flags &= ~DRIVE_DMA; 2368 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2369 } else if (drvp->drive_flags & DRIVE_DMA) { 2370 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2371 } 2372 } 2373 2374 /* 2375 * Nothing to do to setup modes; it is meaningless in S-ATA 2376 * (but many S-ATA drives still want to get the SET_FEATURE 2377 * command). 2378 */ 2379 if (idedma_ctl != 0) { 2380 /* Add software bits in status register */ 2381 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2382 } 2383 pciide_print_modes(cp); 2384 } 2385 2386 void 2387 piix_timing_debug(struct pciide_softc *sc) 2388 { 2389 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2390 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2391 DEBUG_PROBE); 2392 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2393 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2394 WDCDEBUG_PRINT((", sidetim=0x%x", 2395 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2396 DEBUG_PROBE); 2397 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2398 WDCDEBUG_PRINT((", udmareg 0x%x", 2399 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2400 DEBUG_PROBE); 2401 } 2402 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2403 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2404 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2405 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2406 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2407 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2408 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2409 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2410 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2411 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2412 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2413 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2414 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2415 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2416 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2417 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2418 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2419 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2420 DEBUG_PROBE); 2421 } 2422 } 2423 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2424 } 2425 2426 void 2427 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2428 { 2429 struct pciide_channel *cp; 2430 int channel; 2431 u_int32_t idetim; 2432 bus_size_t cmdsize, ctlsize; 2433 2434 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2435 2436 printf(": DMA"); 2437 pciide_mapreg_dma(sc, pa); 2438 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2439 WDC_CAPABILITY_MODE; 2440 if (sc->sc_dma_ok) { 2441 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2442 sc->sc_wdcdev.irqack = pciide_irqack; 2443 switch (sc->sc_pp->ide_product) { 2444 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2445 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2446 case PCI_PRODUCT_INTEL_82371AB_IDE: 2447 case PCI_PRODUCT_INTEL_82372FB_IDE: 2448 case PCI_PRODUCT_INTEL_82440MX_IDE: 2449 case PCI_PRODUCT_INTEL_82451NX: 2450 case PCI_PRODUCT_INTEL_82801AA_IDE: 2451 case PCI_PRODUCT_INTEL_82801AB_IDE: 2452 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2453 case PCI_PRODUCT_INTEL_82801BA_IDE: 2454 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2455 case PCI_PRODUCT_INTEL_82801CA_IDE: 2456 case PCI_PRODUCT_INTEL_82801DB_IDE: 2457 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2458 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2459 case PCI_PRODUCT_INTEL_82801EB_IDE: 2460 case PCI_PRODUCT_INTEL_82801FB_IDE: 2461 case PCI_PRODUCT_INTEL_82801GB_IDE: 2462 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2463 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2464 break; 2465 } 2466 } 2467 sc->sc_wdcdev.PIO_cap = 4; 2468 sc->sc_wdcdev.DMA_cap = 2; 2469 switch (sc->sc_pp->ide_product) { 2470 case PCI_PRODUCT_INTEL_82801AA_IDE: 2471 case PCI_PRODUCT_INTEL_82372FB_IDE: 2472 sc->sc_wdcdev.UDMA_cap = 4; 2473 break; 2474 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2475 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2476 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2477 case PCI_PRODUCT_INTEL_82801BA_IDE: 2478 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2479 case PCI_PRODUCT_INTEL_82801CA_IDE: 2480 case PCI_PRODUCT_INTEL_82801DB_IDE: 2481 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2482 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2483 case PCI_PRODUCT_INTEL_82801EB_IDE: 2484 case PCI_PRODUCT_INTEL_82801FB_IDE: 2485 case PCI_PRODUCT_INTEL_82801GB_IDE: 2486 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2487 sc->sc_wdcdev.UDMA_cap = 5; 2488 break; 2489 default: 2490 sc->sc_wdcdev.UDMA_cap = 2; 2491 break; 2492 } 2493 2494 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2495 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2496 sc->sc_wdcdev.set_modes = piix_setup_channel; 2497 } else { 2498 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2499 } 2500 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2501 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2502 2503 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2504 2505 piix_timing_debug(sc); 2506 2507 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2508 cp = &sc->pciide_channels[channel]; 2509 2510 /* PIIX is compat-only */ 2511 if (pciide_chansetup(sc, channel, 0) == 0) 2512 continue; 2513 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2514 if ((PIIX_IDETIM_READ(idetim, channel) & 2515 PIIX_IDETIM_IDE) == 0) { 2516 printf("%s: %s ignored (disabled)\n", 2517 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2518 continue; 2519 } 2520 /* PIIX are compat-only pciide devices */ 2521 pciide_map_compat_intr(pa, cp, channel, 0); 2522 if (cp->hw_ok == 0) 2523 continue; 2524 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 2525 if (cp->hw_ok == 0) 2526 goto next; 2527 if (pciide_chan_candisable(cp)) { 2528 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2529 channel); 2530 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2531 idetim); 2532 } 2533 if (cp->hw_ok == 0) 2534 goto next; 2535 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2536 next: 2537 if (cp->hw_ok == 0) 2538 pciide_unmap_compat_intr(pa, cp, channel, 0); 2539 } 2540 2541 piix_timing_debug(sc); 2542 } 2543 2544 void 2545 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2546 { 2547 struct pciide_channel *cp; 2548 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2549 int channel; 2550 bus_size_t cmdsize, ctlsize; 2551 u_int8_t reg, ich = 0; 2552 2553 printf(": DMA"); 2554 pciide_mapreg_dma(sc, pa); 2555 2556 if (sc->sc_dma_ok) { 2557 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2558 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2559 sc->sc_wdcdev.irqack = pciide_irqack; 2560 sc->sc_wdcdev.DMA_cap = 2; 2561 sc->sc_wdcdev.UDMA_cap = 6; 2562 } 2563 sc->sc_wdcdev.PIO_cap = 4; 2564 2565 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2566 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2567 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2568 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2569 sc->sc_wdcdev.set_modes = sata_setup_channel; 2570 2571 switch(sc->sc_pp->ide_product) { 2572 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2573 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2574 case PCI_PRODUCT_INTEL_82801EB_SATA: 2575 case PCI_PRODUCT_INTEL_82801ER_SATA: 2576 ich = 5; 2577 break; 2578 case PCI_PRODUCT_INTEL_82801FB_SATA: 2579 case PCI_PRODUCT_INTEL_82801FR_SATA: 2580 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2581 ich = 6; 2582 break; 2583 default: 2584 ich = 7; 2585 break; 2586 } 2587 2588 /* 2589 * Put the SATA portion of controllers that don't operate in combined 2590 * mode into native PCI modes so the maximum number of devices can be 2591 * used. Intel calls this "enhanced mode" 2592 */ 2593 if (ich == 5) { 2594 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2595 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2596 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2597 ICH5_SATA_PI); 2598 reg |= ICH5_SATA_PI_PRI_NATIVE | 2599 ICH5_SATA_PI_SEC_NATIVE; 2600 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2601 ICH5_SATA_PI, reg); 2602 interface |= PCIIDE_INTERFACE_PCI(0) | 2603 PCIIDE_INTERFACE_PCI(1); 2604 } 2605 } else { 2606 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2607 ICH6_SATA_MAP_CMB_MASK; 2608 if (reg != ICH6_SATA_MAP_CMB_PRI && 2609 reg != ICH6_SATA_MAP_CMB_SEC) { 2610 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2611 ICH5_SATA_PI); 2612 reg |= ICH5_SATA_PI_PRI_NATIVE | 2613 ICH5_SATA_PI_SEC_NATIVE; 2614 2615 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2616 ICH5_SATA_PI, reg); 2617 interface |= PCIIDE_INTERFACE_PCI(0) | 2618 PCIIDE_INTERFACE_PCI(1); 2619 2620 /* 2621 * Ask for SATA IDE Mode, we don't need to do this 2622 * for the combined mode case as combined mode is 2623 * only allowed in IDE Mode 2624 */ 2625 if (ich >= 7) { 2626 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2627 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2628 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2629 ICH5_SATA_MAP, reg); 2630 } 2631 } 2632 } 2633 2634 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2635 2636 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2637 cp = &sc->pciide_channels[channel]; 2638 if (pciide_chansetup(sc, channel, interface) == 0) 2639 continue; 2640 2641 pciide_map_compat_intr(pa, cp, channel, interface); 2642 if (cp->hw_ok == 0) 2643 continue; 2644 2645 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2646 pciide_pci_intr); 2647 if (cp->hw_ok != 0) 2648 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2649 2650 if (cp->hw_ok == 0) 2651 pciide_unmap_compat_intr(pa, cp, channel, interface); 2652 } 2653 } 2654 2655 void 2656 piix_setup_channel(struct channel_softc *chp) 2657 { 2658 u_int8_t mode[2], drive; 2659 u_int32_t oidetim, idetim, idedma_ctl; 2660 struct pciide_channel *cp = (struct pciide_channel *)chp; 2661 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2662 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2663 2664 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2665 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2666 idedma_ctl = 0; 2667 2668 /* set up new idetim: Enable IDE registers decode */ 2669 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2670 chp->channel); 2671 2672 /* setup DMA */ 2673 pciide_channel_dma_setup(cp); 2674 2675 /* 2676 * Here we have to mess up with drives mode: PIIX can't have 2677 * different timings for master and slave drives. 2678 * We need to find the best combination. 2679 */ 2680 2681 /* If both drives supports DMA, take the lower mode */ 2682 if ((drvp[0].drive_flags & DRIVE_DMA) && 2683 (drvp[1].drive_flags & DRIVE_DMA)) { 2684 mode[0] = mode[1] = 2685 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2686 drvp[0].DMA_mode = mode[0]; 2687 drvp[1].DMA_mode = mode[1]; 2688 goto ok; 2689 } 2690 /* 2691 * If only one drive supports DMA, use its mode, and 2692 * put the other one in PIO mode 0 if mode not compatible 2693 */ 2694 if (drvp[0].drive_flags & DRIVE_DMA) { 2695 mode[0] = drvp[0].DMA_mode; 2696 mode[1] = drvp[1].PIO_mode; 2697 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2698 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2699 mode[1] = drvp[1].PIO_mode = 0; 2700 goto ok; 2701 } 2702 if (drvp[1].drive_flags & DRIVE_DMA) { 2703 mode[1] = drvp[1].DMA_mode; 2704 mode[0] = drvp[0].PIO_mode; 2705 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2706 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2707 mode[0] = drvp[0].PIO_mode = 0; 2708 goto ok; 2709 } 2710 /* 2711 * If both drives are not DMA, takes the lower mode, unless 2712 * one of them is PIO mode < 2 2713 */ 2714 if (drvp[0].PIO_mode < 2) { 2715 mode[0] = drvp[0].PIO_mode = 0; 2716 mode[1] = drvp[1].PIO_mode; 2717 } else if (drvp[1].PIO_mode < 2) { 2718 mode[1] = drvp[1].PIO_mode = 0; 2719 mode[0] = drvp[0].PIO_mode; 2720 } else { 2721 mode[0] = mode[1] = 2722 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 2723 drvp[0].PIO_mode = mode[0]; 2724 drvp[1].PIO_mode = mode[1]; 2725 } 2726 ok: /* The modes are setup */ 2727 for (drive = 0; drive < 2; drive++) { 2728 if (drvp[drive].drive_flags & DRIVE_DMA) { 2729 idetim |= piix_setup_idetim_timings( 2730 mode[drive], 1, chp->channel); 2731 goto end; 2732 } 2733 } 2734 /* If we are there, none of the drives are DMA */ 2735 if (mode[0] >= 2) 2736 idetim |= piix_setup_idetim_timings( 2737 mode[0], 0, chp->channel); 2738 else 2739 idetim |= piix_setup_idetim_timings( 2740 mode[1], 0, chp->channel); 2741 end: /* 2742 * timing mode is now set up in the controller. Enable 2743 * it per-drive 2744 */ 2745 for (drive = 0; drive < 2; drive++) { 2746 /* If no drive, skip */ 2747 if ((drvp[drive].drive_flags & DRIVE) == 0) 2748 continue; 2749 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 2750 if (drvp[drive].drive_flags & DRIVE_DMA) 2751 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2752 } 2753 if (idedma_ctl != 0) { 2754 /* Add software bits in status register */ 2755 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2756 IDEDMA_CTL(chp->channel), 2757 idedma_ctl); 2758 } 2759 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2760 pciide_print_modes(cp); 2761 } 2762 2763 void 2764 piix3_4_setup_channel(struct channel_softc *chp) 2765 { 2766 struct ata_drive_datas *drvp; 2767 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 2768 struct pciide_channel *cp = (struct pciide_channel *)chp; 2769 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2770 int drive; 2771 int channel = chp->channel; 2772 2773 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2774 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 2775 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 2776 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 2777 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 2778 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 2779 PIIX_SIDETIM_RTC_MASK(channel)); 2780 2781 idedma_ctl = 0; 2782 /* If channel disabled, no need to go further */ 2783 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 2784 return; 2785 /* set up new idetim: Enable IDE registers decode */ 2786 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 2787 2788 /* setup DMA if needed */ 2789 pciide_channel_dma_setup(cp); 2790 2791 for (drive = 0; drive < 2; drive++) { 2792 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 2793 PIIX_UDMATIM_SET(0x3, channel, drive)); 2794 drvp = &chp->ch_drive[drive]; 2795 /* If no drive, skip */ 2796 if ((drvp->drive_flags & DRIVE) == 0) 2797 continue; 2798 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2799 (drvp->drive_flags & DRIVE_UDMA) == 0)) 2800 goto pio; 2801 2802 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2803 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2804 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2805 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2806 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2807 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2808 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2809 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2810 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2811 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2812 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2813 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2814 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2815 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2816 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2817 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2818 ideconf |= PIIX_CONFIG_PINGPONG; 2819 } 2820 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2821 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2822 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2823 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 2824 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 2825 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2826 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2827 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2828 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2829 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2830 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2831 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2832 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 2833 /* setup Ultra/100 */ 2834 if (drvp->UDMA_mode > 2 && 2835 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2836 drvp->UDMA_mode = 2; 2837 if (drvp->UDMA_mode > 4) { 2838 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 2839 } else { 2840 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 2841 if (drvp->UDMA_mode > 2) { 2842 ideconf |= PIIX_CONFIG_UDMA66(channel, 2843 drive); 2844 } else { 2845 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 2846 drive); 2847 } 2848 } 2849 } 2850 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2851 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2852 /* setup Ultra/66 */ 2853 if (drvp->UDMA_mode > 2 && 2854 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2855 drvp->UDMA_mode = 2; 2856 if (drvp->UDMA_mode > 2) 2857 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 2858 else 2859 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 2860 } 2861 2862 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2863 (drvp->drive_flags & DRIVE_UDMA)) { 2864 /* use Ultra/DMA */ 2865 drvp->drive_flags &= ~DRIVE_DMA; 2866 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 2867 udmareg |= PIIX_UDMATIM_SET( 2868 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 2869 } else { 2870 /* use Multiword DMA */ 2871 drvp->drive_flags &= ~DRIVE_UDMA; 2872 if (drive == 0) { 2873 idetim |= piix_setup_idetim_timings( 2874 drvp->DMA_mode, 1, channel); 2875 } else { 2876 sidetim |= piix_setup_sidetim_timings( 2877 drvp->DMA_mode, 1, channel); 2878 idetim = PIIX_IDETIM_SET(idetim, 2879 PIIX_IDETIM_SITRE, channel); 2880 } 2881 } 2882 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2883 2884 pio: /* use PIO mode */ 2885 idetim |= piix_setup_idetim_drvs(drvp); 2886 if (drive == 0) { 2887 idetim |= piix_setup_idetim_timings( 2888 drvp->PIO_mode, 0, channel); 2889 } else { 2890 sidetim |= piix_setup_sidetim_timings( 2891 drvp->PIO_mode, 0, channel); 2892 idetim = PIIX_IDETIM_SET(idetim, 2893 PIIX_IDETIM_SITRE, channel); 2894 } 2895 } 2896 if (idedma_ctl != 0) { 2897 /* Add software bits in status register */ 2898 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2899 IDEDMA_CTL(channel), 2900 idedma_ctl); 2901 } 2902 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2903 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 2904 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 2905 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 2906 pciide_print_modes(cp); 2907 } 2908 2909 2910 /* setup ISP and RTC fields, based on mode */ 2911 u_int32_t 2912 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 2913 { 2914 2915 if (dma) 2916 return (PIIX_IDETIM_SET(0, 2917 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 2918 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 2919 channel)); 2920 else 2921 return (PIIX_IDETIM_SET(0, 2922 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 2923 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 2924 channel)); 2925 } 2926 2927 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 2928 u_int32_t 2929 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 2930 { 2931 u_int32_t ret = 0; 2932 struct channel_softc *chp = drvp->chnl_softc; 2933 u_int8_t channel = chp->channel; 2934 u_int8_t drive = drvp->drive; 2935 2936 /* 2937 * If drive is using UDMA, timings setups are independant 2938 * So just check DMA and PIO here. 2939 */ 2940 if (drvp->drive_flags & DRIVE_DMA) { 2941 /* if mode = DMA mode 0, use compatible timings */ 2942 if ((drvp->drive_flags & DRIVE_DMA) && 2943 drvp->DMA_mode == 0) { 2944 drvp->PIO_mode = 0; 2945 return (ret); 2946 } 2947 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2948 /* 2949 * PIO and DMA timings are the same, use fast timings for PIO 2950 * too, else use compat timings. 2951 */ 2952 if ((piix_isp_pio[drvp->PIO_mode] != 2953 piix_isp_dma[drvp->DMA_mode]) || 2954 (piix_rtc_pio[drvp->PIO_mode] != 2955 piix_rtc_dma[drvp->DMA_mode])) 2956 drvp->PIO_mode = 0; 2957 /* if PIO mode <= 2, use compat timings for PIO */ 2958 if (drvp->PIO_mode <= 2) { 2959 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 2960 channel); 2961 return (ret); 2962 } 2963 } 2964 2965 /* 2966 * Now setup PIO modes. If mode < 2, use compat timings. 2967 * Else enable fast timings. Enable IORDY and prefetch/post 2968 * if PIO mode >= 3. 2969 */ 2970 2971 if (drvp->PIO_mode < 2) 2972 return (ret); 2973 2974 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2975 if (drvp->PIO_mode >= 3) { 2976 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 2977 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 2978 } 2979 return (ret); 2980 } 2981 2982 /* setup values in SIDETIM registers, based on mode */ 2983 u_int32_t 2984 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 2985 { 2986 if (dma) 2987 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 2988 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 2989 else 2990 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 2991 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 2992 } 2993 2994 void 2995 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2996 { 2997 struct pciide_channel *cp; 2998 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2999 int channel; 3000 pcireg_t chanenable; 3001 bus_size_t cmdsize, ctlsize; 3002 3003 printf(": DMA"); 3004 pciide_mapreg_dma(sc, pa); 3005 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3006 WDC_CAPABILITY_MODE; 3007 if (sc->sc_dma_ok) { 3008 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3009 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3010 sc->sc_wdcdev.irqack = pciide_irqack; 3011 } 3012 sc->sc_wdcdev.PIO_cap = 4; 3013 sc->sc_wdcdev.DMA_cap = 2; 3014 switch (sc->sc_pp->ide_product) { 3015 case PCI_PRODUCT_AMD_8111_IDE: 3016 sc->sc_wdcdev.UDMA_cap = 6; 3017 break; 3018 case PCI_PRODUCT_AMD_766_IDE: 3019 case PCI_PRODUCT_AMD_PBC768_IDE: 3020 sc->sc_wdcdev.UDMA_cap = 5; 3021 break; 3022 default: 3023 sc->sc_wdcdev.UDMA_cap = 4; 3024 break; 3025 } 3026 sc->sc_wdcdev.set_modes = amd756_setup_channel; 3027 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3028 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3029 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 3030 3031 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3032 3033 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3034 cp = &sc->pciide_channels[channel]; 3035 if (pciide_chansetup(sc, channel, interface) == 0) 3036 continue; 3037 3038 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 3039 printf("%s: %s ignored (disabled)\n", 3040 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3041 continue; 3042 } 3043 pciide_map_compat_intr(pa, cp, channel, interface); 3044 if (cp->hw_ok == 0) 3045 continue; 3046 3047 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3048 pciide_pci_intr); 3049 3050 if (pciide_chan_candisable(cp)) { 3051 chanenable &= ~AMD756_CHAN_EN(channel); 3052 } 3053 if (cp->hw_ok == 0) { 3054 pciide_unmap_compat_intr(pa, cp, channel, interface); 3055 continue; 3056 } 3057 3058 amd756_setup_channel(&cp->wdc_channel); 3059 } 3060 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 3061 chanenable); 3062 return; 3063 } 3064 3065 void 3066 amd756_setup_channel(struct channel_softc *chp) 3067 { 3068 u_int32_t udmatim_reg, datatim_reg; 3069 u_int8_t idedma_ctl; 3070 int mode, drive; 3071 struct ata_drive_datas *drvp; 3072 struct pciide_channel *cp = (struct pciide_channel *)chp; 3073 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3074 pcireg_t chanenable; 3075 #ifndef PCIIDE_AMD756_ENABLEDMA 3076 int product = sc->sc_pp->ide_product; 3077 int rev = sc->sc_rev; 3078 #endif 3079 3080 idedma_ctl = 0; 3081 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 3082 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 3083 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 3084 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 3085 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 3086 AMD756_CHANSTATUS_EN); 3087 3088 /* setup DMA if needed */ 3089 pciide_channel_dma_setup(cp); 3090 3091 for (drive = 0; drive < 2; drive++) { 3092 drvp = &chp->ch_drive[drive]; 3093 /* If no drive, skip */ 3094 if ((drvp->drive_flags & DRIVE) == 0) 3095 continue; 3096 /* add timing values, setup DMA if needed */ 3097 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3098 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3099 mode = drvp->PIO_mode; 3100 goto pio; 3101 } 3102 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3103 (drvp->drive_flags & DRIVE_UDMA)) { 3104 /* use Ultra/DMA */ 3105 drvp->drive_flags &= ~DRIVE_DMA; 3106 3107 /* Check cable */ 3108 if ((chanenable & AMD756_CABLE(chp->channel, 3109 drive)) == 0 && drvp->UDMA_mode > 2) { 3110 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 3111 "cable not detected\n", drvp->drive_name, 3112 sc->sc_wdcdev.sc_dev.dv_xname, 3113 chp->channel, drive), DEBUG_PROBE); 3114 drvp->UDMA_mode = 2; 3115 } 3116 3117 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 3118 AMD756_UDMA_EN_MTH(chp->channel, drive) | 3119 AMD756_UDMA_TIME(chp->channel, drive, 3120 amd756_udma_tim[drvp->UDMA_mode]); 3121 /* can use PIO timings, MW DMA unused */ 3122 mode = drvp->PIO_mode; 3123 } else { 3124 /* use Multiword DMA, but only if revision is OK */ 3125 drvp->drive_flags &= ~DRIVE_UDMA; 3126 #ifndef PCIIDE_AMD756_ENABLEDMA 3127 /* 3128 * The workaround doesn't seem to be necessary 3129 * with all drives, so it can be disabled by 3130 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 3131 * triggered. 3132 */ 3133 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 3134 printf("%s:%d:%d: multi-word DMA disabled due " 3135 "to chip revision\n", 3136 sc->sc_wdcdev.sc_dev.dv_xname, 3137 chp->channel, drive); 3138 mode = drvp->PIO_mode; 3139 drvp->drive_flags &= ~DRIVE_DMA; 3140 goto pio; 3141 } 3142 #endif 3143 /* mode = min(pio, dma+2) */ 3144 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3145 mode = drvp->PIO_mode; 3146 else 3147 mode = drvp->DMA_mode + 2; 3148 } 3149 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3150 3151 pio: /* setup PIO mode */ 3152 if (mode <= 2) { 3153 drvp->DMA_mode = 0; 3154 drvp->PIO_mode = 0; 3155 mode = 0; 3156 } else { 3157 drvp->PIO_mode = mode; 3158 drvp->DMA_mode = mode - 2; 3159 } 3160 datatim_reg |= 3161 AMD756_DATATIM_PULSE(chp->channel, drive, 3162 amd756_pio_set[mode]) | 3163 AMD756_DATATIM_RECOV(chp->channel, drive, 3164 amd756_pio_rec[mode]); 3165 } 3166 if (idedma_ctl != 0) { 3167 /* Add software bits in status register */ 3168 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3169 IDEDMA_CTL(chp->channel), 3170 idedma_ctl); 3171 } 3172 pciide_print_modes(cp); 3173 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3174 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3175 } 3176 3177 void 3178 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3179 { 3180 struct pciide_channel *cp; 3181 pcireg_t interface; 3182 int channel; 3183 u_int32_t ideconf; 3184 bus_size_t cmdsize, ctlsize; 3185 pcitag_t tag; 3186 pcireg_t id, class; 3187 3188 /* 3189 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3190 */ 3191 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3192 interface = PCI_INTERFACE(pa->pa_class); 3193 } else { 3194 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3195 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3196 } 3197 3198 if ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT6410) || 3199 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_CX700_IDE) || 3200 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX700_IDE) || 3201 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX855_IDE)) { 3202 printf(": ATA133"); 3203 sc->sc_wdcdev.UDMA_cap = 6; 3204 } else { 3205 /* 3206 * Determine the DMA capabilities by looking at the 3207 * ISA bridge. 3208 */ 3209 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3210 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3211 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3212 3213 /* 3214 * XXX On the VT8237, the ISA bridge is on a different 3215 * device. 3216 */ 3217 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3218 pa->pa_device == 15) { 3219 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3220 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3221 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3222 } 3223 3224 switch (PCI_PRODUCT(id)) { 3225 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3226 if (PCI_REVISION(class) >= 0x02) { 3227 printf(": ATA33"); 3228 sc->sc_wdcdev.UDMA_cap = 2; 3229 } else { 3230 printf(": DMA"); 3231 sc->sc_wdcdev.UDMA_cap = 0; 3232 } 3233 break; 3234 case PCI_PRODUCT_VIATECH_VT82C596A: 3235 if (PCI_REVISION(class) >= 0x12) { 3236 printf(": ATA66"); 3237 sc->sc_wdcdev.UDMA_cap = 4; 3238 } else { 3239 printf(": ATA33"); 3240 sc->sc_wdcdev.UDMA_cap = 2; 3241 } 3242 break; 3243 3244 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3245 if (PCI_REVISION(class) >= 0x40) { 3246 printf(": ATA100"); 3247 sc->sc_wdcdev.UDMA_cap = 5; 3248 } else { 3249 printf(": ATA66"); 3250 sc->sc_wdcdev.UDMA_cap = 4; 3251 } 3252 break; 3253 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3254 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3255 printf(": ATA100"); 3256 sc->sc_wdcdev.UDMA_cap = 5; 3257 break; 3258 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3259 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3260 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3261 printf(": ATA133"); 3262 sc->sc_wdcdev.UDMA_cap = 6; 3263 break; 3264 default: 3265 printf(": DMA"); 3266 sc->sc_wdcdev.UDMA_cap = 0; 3267 break; 3268 } 3269 } 3270 3271 pciide_mapreg_dma(sc, pa); 3272 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3273 WDC_CAPABILITY_MODE; 3274 if (sc->sc_dma_ok) { 3275 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3276 sc->sc_wdcdev.irqack = pciide_irqack; 3277 if (sc->sc_wdcdev.UDMA_cap > 0) 3278 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3279 } 3280 sc->sc_wdcdev.PIO_cap = 4; 3281 sc->sc_wdcdev.DMA_cap = 2; 3282 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3283 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3284 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3285 3286 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3287 3288 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3289 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3290 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3291 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3292 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3293 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3294 DEBUG_PROBE); 3295 3296 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3297 cp = &sc->pciide_channels[channel]; 3298 if (pciide_chansetup(sc, channel, interface) == 0) 3299 continue; 3300 3301 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 3302 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3303 printf("%s: %s ignored (disabled)\n", 3304 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3305 continue; 3306 } 3307 pciide_map_compat_intr(pa, cp, channel, interface); 3308 if (cp->hw_ok == 0) 3309 continue; 3310 3311 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3312 pciide_pci_intr); 3313 if (cp->hw_ok == 0) { 3314 goto next; 3315 } 3316 if (pciide_chan_candisable(cp)) { 3317 ideconf &= ~APO_IDECONF_EN(channel); 3318 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 3319 ideconf); 3320 } 3321 3322 if (cp->hw_ok == 0) 3323 goto next; 3324 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3325 next: 3326 if (cp->hw_ok == 0) 3327 pciide_unmap_compat_intr(pa, cp, channel, interface); 3328 } 3329 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3330 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3331 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3332 } 3333 3334 void 3335 apollo_setup_channel(struct channel_softc *chp) 3336 { 3337 u_int32_t udmatim_reg, datatim_reg; 3338 u_int8_t idedma_ctl; 3339 int mode, drive; 3340 struct ata_drive_datas *drvp; 3341 struct pciide_channel *cp = (struct pciide_channel *)chp; 3342 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3343 3344 idedma_ctl = 0; 3345 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3346 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3347 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3348 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3349 3350 /* setup DMA if needed */ 3351 pciide_channel_dma_setup(cp); 3352 3353 /* 3354 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3355 * downgrade to Ultra/33 if needed 3356 */ 3357 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3358 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3359 /* both drives UDMA */ 3360 if (chp->ch_drive[0].UDMA_mode > 2 && 3361 chp->ch_drive[1].UDMA_mode <= 2) { 3362 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3363 chp->ch_drive[0].UDMA_mode = 2; 3364 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3365 chp->ch_drive[0].UDMA_mode <= 2) { 3366 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3367 chp->ch_drive[1].UDMA_mode = 2; 3368 } 3369 } 3370 3371 for (drive = 0; drive < 2; drive++) { 3372 drvp = &chp->ch_drive[drive]; 3373 /* If no drive, skip */ 3374 if ((drvp->drive_flags & DRIVE) == 0) 3375 continue; 3376 /* add timing values, setup DMA if needed */ 3377 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3378 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3379 mode = drvp->PIO_mode; 3380 goto pio; 3381 } 3382 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3383 (drvp->drive_flags & DRIVE_UDMA)) { 3384 /* use Ultra/DMA */ 3385 drvp->drive_flags &= ~DRIVE_DMA; 3386 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3387 APO_UDMA_EN_MTH(chp->channel, drive); 3388 if (sc->sc_wdcdev.UDMA_cap == 6) { 3389 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3390 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3391 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3392 /* 686b */ 3393 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3394 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3395 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3396 /* 596b or 686a */ 3397 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3398 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3399 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3400 } else { 3401 /* 596a or 586b */ 3402 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3403 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3404 } 3405 /* can use PIO timings, MW DMA unused */ 3406 mode = drvp->PIO_mode; 3407 } else { 3408 /* use Multiword DMA */ 3409 drvp->drive_flags &= ~DRIVE_UDMA; 3410 /* mode = min(pio, dma+2) */ 3411 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3412 mode = drvp->PIO_mode; 3413 else 3414 mode = drvp->DMA_mode + 2; 3415 } 3416 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3417 3418 pio: /* setup PIO mode */ 3419 if (mode <= 2) { 3420 drvp->DMA_mode = 0; 3421 drvp->PIO_mode = 0; 3422 mode = 0; 3423 } else { 3424 drvp->PIO_mode = mode; 3425 drvp->DMA_mode = mode - 2; 3426 } 3427 datatim_reg |= 3428 APO_DATATIM_PULSE(chp->channel, drive, 3429 apollo_pio_set[mode]) | 3430 APO_DATATIM_RECOV(chp->channel, drive, 3431 apollo_pio_rec[mode]); 3432 } 3433 if (idedma_ctl != 0) { 3434 /* Add software bits in status register */ 3435 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3436 IDEDMA_CTL(chp->channel), 3437 idedma_ctl); 3438 } 3439 pciide_print_modes(cp); 3440 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3441 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3442 } 3443 3444 void 3445 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3446 int channel) 3447 { 3448 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3449 bus_size_t cmdsize, ctlsize; 3450 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3451 pcireg_t interface; 3452 int one_channel; 3453 3454 /* 3455 * The 0648/0649 can be told to identify as a RAID controller. 3456 * In this case, we have to fake interface 3457 */ 3458 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3459 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3460 PCIIDE_INTERFACE_SETTABLE(1); 3461 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3462 CMD_CONF_DSA1) 3463 interface |= PCIIDE_INTERFACE_PCI(0) | 3464 PCIIDE_INTERFACE_PCI(1); 3465 } else { 3466 interface = PCI_INTERFACE(pa->pa_class); 3467 } 3468 3469 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3470 cp->name = PCIIDE_CHANNEL_NAME(channel); 3471 cp->wdc_channel.channel = channel; 3472 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3473 3474 /* 3475 * Older CMD64X doesn't have independant channels 3476 */ 3477 switch (sc->sc_pp->ide_product) { 3478 case PCI_PRODUCT_CMDTECH_649: 3479 one_channel = 0; 3480 break; 3481 default: 3482 one_channel = 1; 3483 break; 3484 } 3485 3486 if (channel > 0 && one_channel) { 3487 cp->wdc_channel.ch_queue = 3488 sc->pciide_channels[0].wdc_channel.ch_queue; 3489 } else { 3490 cp->wdc_channel.ch_queue = 3491 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3492 } 3493 if (cp->wdc_channel.ch_queue == NULL) { 3494 printf( 3495 "%s: %s cannot allocate memory for command queue", 3496 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3497 return; 3498 } 3499 3500 /* 3501 * with a CMD PCI64x, if we get here, the first channel is enabled: 3502 * there's no way to disable the first channel without disabling 3503 * the whole device 3504 */ 3505 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3506 printf("%s: %s ignored (disabled)\n", 3507 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3508 return; 3509 } 3510 cp->hw_ok = 1; 3511 pciide_map_compat_intr(pa, cp, channel, interface); 3512 if (cp->hw_ok == 0) 3513 return; 3514 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3515 if (cp->hw_ok == 0) { 3516 pciide_unmap_compat_intr(pa, cp, channel, interface); 3517 return; 3518 } 3519 if (pciide_chan_candisable(cp)) { 3520 if (channel == 1) { 3521 ctrl &= ~CMD_CTRL_2PORT; 3522 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3523 CMD_CTRL, ctrl); 3524 pciide_unmap_compat_intr(pa, cp, channel, interface); 3525 } 3526 } 3527 } 3528 3529 int 3530 cmd_pci_intr(void *arg) 3531 { 3532 struct pciide_softc *sc = arg; 3533 struct pciide_channel *cp; 3534 struct channel_softc *wdc_cp; 3535 int i, rv, crv; 3536 u_int32_t priirq, secirq; 3537 3538 rv = 0; 3539 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3540 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3541 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3542 cp = &sc->pciide_channels[i]; 3543 wdc_cp = &cp->wdc_channel; 3544 /* If a compat channel skip. */ 3545 if (cp->compat) 3546 continue; 3547 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3548 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3549 crv = wdcintr(wdc_cp); 3550 if (crv == 0) { 3551 #if 0 3552 printf("%s:%d: bogus intr\n", 3553 sc->sc_wdcdev.sc_dev.dv_xname, i); 3554 #endif 3555 } else 3556 rv = 1; 3557 } 3558 } 3559 return (rv); 3560 } 3561 3562 void 3563 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3564 { 3565 int channel; 3566 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3567 3568 printf(": no DMA"); 3569 sc->sc_dma_ok = 0; 3570 3571 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3572 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3573 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3574 3575 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3576 3577 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3578 cmd_channel_map(pa, sc, channel); 3579 } 3580 } 3581 3582 void 3583 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3584 { 3585 struct pciide_channel *cp; 3586 int channel; 3587 int rev = sc->sc_rev; 3588 pcireg_t interface; 3589 3590 /* 3591 * The 0648/0649 can be told to identify as a RAID controller. 3592 * In this case, we have to fake interface 3593 */ 3594 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3595 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3596 PCIIDE_INTERFACE_SETTABLE(1); 3597 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3598 CMD_CONF_DSA1) 3599 interface |= PCIIDE_INTERFACE_PCI(0) | 3600 PCIIDE_INTERFACE_PCI(1); 3601 } else { 3602 interface = PCI_INTERFACE(pa->pa_class); 3603 } 3604 3605 printf(": DMA"); 3606 pciide_mapreg_dma(sc, pa); 3607 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3608 WDC_CAPABILITY_MODE; 3609 if (sc->sc_dma_ok) { 3610 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3611 switch (sc->sc_pp->ide_product) { 3612 case PCI_PRODUCT_CMDTECH_649: 3613 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3614 sc->sc_wdcdev.UDMA_cap = 5; 3615 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3616 break; 3617 case PCI_PRODUCT_CMDTECH_648: 3618 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3619 sc->sc_wdcdev.UDMA_cap = 4; 3620 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3621 break; 3622 case PCI_PRODUCT_CMDTECH_646: 3623 if (rev >= CMD0646U2_REV) { 3624 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3625 sc->sc_wdcdev.UDMA_cap = 2; 3626 } else if (rev >= CMD0646U_REV) { 3627 /* 3628 * Linux's driver claims that the 646U is broken 3629 * with UDMA. Only enable it if we know what we're 3630 * doing 3631 */ 3632 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3633 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3634 sc->sc_wdcdev.UDMA_cap = 2; 3635 #endif 3636 /* explicitly disable UDMA */ 3637 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3638 CMD_UDMATIM(0), 0); 3639 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3640 CMD_UDMATIM(1), 0); 3641 } 3642 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3643 break; 3644 default: 3645 sc->sc_wdcdev.irqack = pciide_irqack; 3646 } 3647 } 3648 3649 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3650 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3651 sc->sc_wdcdev.PIO_cap = 4; 3652 sc->sc_wdcdev.DMA_cap = 2; 3653 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3654 3655 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3656 3657 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3658 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3659 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3660 DEBUG_PROBE); 3661 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3662 cp = &sc->pciide_channels[channel]; 3663 cmd_channel_map(pa, sc, channel); 3664 if (cp->hw_ok == 0) 3665 continue; 3666 cmd0643_9_setup_channel(&cp->wdc_channel); 3667 } 3668 /* 3669 * note - this also makes sure we clear the irq disable and reset 3670 * bits 3671 */ 3672 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3673 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3674 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3675 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3676 DEBUG_PROBE); 3677 } 3678 3679 void 3680 cmd0643_9_setup_channel(struct channel_softc *chp) 3681 { 3682 struct ata_drive_datas *drvp; 3683 u_int8_t tim; 3684 u_int32_t idedma_ctl, udma_reg; 3685 int drive; 3686 struct pciide_channel *cp = (struct pciide_channel *)chp; 3687 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3688 3689 idedma_ctl = 0; 3690 /* setup DMA if needed */ 3691 pciide_channel_dma_setup(cp); 3692 3693 for (drive = 0; drive < 2; drive++) { 3694 drvp = &chp->ch_drive[drive]; 3695 /* If no drive, skip */ 3696 if ((drvp->drive_flags & DRIVE) == 0) 3697 continue; 3698 /* add timing values, setup DMA if needed */ 3699 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3700 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3701 if (drvp->drive_flags & DRIVE_UDMA) { 3702 /* UltraDMA on a 646U2, 0648 or 0649 */ 3703 drvp->drive_flags &= ~DRIVE_DMA; 3704 udma_reg = pciide_pci_read(sc->sc_pc, 3705 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3706 if (drvp->UDMA_mode > 2 && 3707 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3708 CMD_BICSR) & 3709 CMD_BICSR_80(chp->channel)) == 0) { 3710 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3711 "80-wire cable not detected\n", 3712 drvp->drive_name, 3713 sc->sc_wdcdev.sc_dev.dv_xname, 3714 chp->channel, drive), DEBUG_PROBE); 3715 drvp->UDMA_mode = 2; 3716 } 3717 if (drvp->UDMA_mode > 2) 3718 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 3719 else if (sc->sc_wdcdev.UDMA_cap > 2) 3720 udma_reg |= CMD_UDMATIM_UDMA33(drive); 3721 udma_reg |= CMD_UDMATIM_UDMA(drive); 3722 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 3723 CMD_UDMATIM_TIM_OFF(drive)); 3724 udma_reg |= 3725 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 3726 CMD_UDMATIM_TIM_OFF(drive)); 3727 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3728 CMD_UDMATIM(chp->channel), udma_reg); 3729 } else { 3730 /* 3731 * use Multiword DMA. 3732 * Timings will be used for both PIO and DMA, 3733 * so adjust DMA mode if needed 3734 * if we have a 0646U2/8/9, turn off UDMA 3735 */ 3736 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 3737 udma_reg = pciide_pci_read(sc->sc_pc, 3738 sc->sc_tag, 3739 CMD_UDMATIM(chp->channel)); 3740 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 3741 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3742 CMD_UDMATIM(chp->channel), 3743 udma_reg); 3744 } 3745 if (drvp->PIO_mode >= 3 && 3746 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3747 drvp->DMA_mode = drvp->PIO_mode - 2; 3748 } 3749 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 3750 } 3751 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3752 } 3753 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3754 CMD_DATA_TIM(chp->channel, drive), tim); 3755 } 3756 if (idedma_ctl != 0) { 3757 /* Add software bits in status register */ 3758 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3759 IDEDMA_CTL(chp->channel), 3760 idedma_ctl); 3761 } 3762 pciide_print_modes(cp); 3763 #ifdef __sparc64__ 3764 /* 3765 * The Ultra 5 has a tendency to hang during reboot. This is due 3766 * to the PCI0646U asserting a PCI interrupt line when the chip 3767 * registers claim that it is not. Performing a reset at this 3768 * point appears to eliminate the symptoms. It is likely the 3769 * real cause is still lurking somewhere in the code. 3770 */ 3771 wdcreset(chp, SILENT); 3772 #endif /* __sparc64__ */ 3773 } 3774 3775 void 3776 cmd646_9_irqack(struct channel_softc *chp) 3777 { 3778 u_int32_t priirq, secirq; 3779 struct pciide_channel *cp = (struct pciide_channel *)chp; 3780 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3781 3782 if (chp->channel == 0) { 3783 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3784 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 3785 } else { 3786 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3787 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 3788 } 3789 pciide_irqack(chp); 3790 } 3791 3792 void 3793 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3794 { 3795 struct pciide_channel *cp; 3796 int channel; 3797 3798 printf("\n%s: bus-master DMA support present", 3799 sc->sc_wdcdev.sc_dev.dv_xname); 3800 pciide_mapreg_dma(sc, pa); 3801 printf("\n"); 3802 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3803 WDC_CAPABILITY_MODE; 3804 if (sc->sc_dma_ok) { 3805 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3806 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3807 sc->sc_wdcdev.UDMA_cap = 6; 3808 sc->sc_wdcdev.irqack = pciide_irqack; 3809 } 3810 3811 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3812 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3813 sc->sc_wdcdev.PIO_cap = 4; 3814 sc->sc_wdcdev.DMA_cap = 2; 3815 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 3816 3817 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 3818 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 3819 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 3820 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 3821 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3822 cp = &sc->pciide_channels[channel]; 3823 cmd680_channel_map(pa, sc, channel); 3824 if (cp->hw_ok == 0) 3825 continue; 3826 cmd680_setup_channel(&cp->wdc_channel); 3827 } 3828 } 3829 3830 void 3831 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3832 int channel) 3833 { 3834 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3835 bus_size_t cmdsize, ctlsize; 3836 int interface, i, reg; 3837 static const u_int8_t init_val[] = 3838 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 3839 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 3840 3841 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3842 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3843 PCIIDE_INTERFACE_SETTABLE(1); 3844 interface |= PCIIDE_INTERFACE_PCI(0) | 3845 PCIIDE_INTERFACE_PCI(1); 3846 } else { 3847 interface = PCI_INTERFACE(pa->pa_class); 3848 } 3849 3850 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3851 cp->name = PCIIDE_CHANNEL_NAME(channel); 3852 cp->wdc_channel.channel = channel; 3853 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3854 3855 cp->wdc_channel.ch_queue = 3856 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3857 if (cp->wdc_channel.ch_queue == NULL) { 3858 printf("%s %s: " 3859 "can't allocate memory for command queue", 3860 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3861 return; 3862 } 3863 3864 /* XXX */ 3865 reg = 0xa2 + channel * 16; 3866 for (i = 0; i < sizeof(init_val); i++) 3867 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 3868 3869 printf("%s: %s %s to %s mode\n", 3870 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 3871 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 3872 "configured" : "wired", 3873 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 3874 "native-PCI" : "compatibility"); 3875 3876 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 3877 if (cp->hw_ok == 0) 3878 return; 3879 pciide_map_compat_intr(pa, cp, channel, interface); 3880 } 3881 3882 void 3883 cmd680_setup_channel(struct channel_softc *chp) 3884 { 3885 struct ata_drive_datas *drvp; 3886 u_int8_t mode, off, scsc; 3887 u_int16_t val; 3888 u_int32_t idedma_ctl; 3889 int drive; 3890 struct pciide_channel *cp = (struct pciide_channel *)chp; 3891 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3892 pci_chipset_tag_t pc = sc->sc_pc; 3893 pcitag_t pa = sc->sc_tag; 3894 static const u_int8_t udma2_tbl[] = 3895 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 3896 static const u_int8_t udma_tbl[] = 3897 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 3898 static const u_int16_t dma_tbl[] = 3899 { 0x2208, 0x10c2, 0x10c1 }; 3900 static const u_int16_t pio_tbl[] = 3901 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 3902 3903 idedma_ctl = 0; 3904 pciide_channel_dma_setup(cp); 3905 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 3906 3907 for (drive = 0; drive < 2; drive++) { 3908 drvp = &chp->ch_drive[drive]; 3909 /* If no drive, skip */ 3910 if ((drvp->drive_flags & DRIVE) == 0) 3911 continue; 3912 mode &= ~(0x03 << (drive * 4)); 3913 if (drvp->drive_flags & DRIVE_UDMA) { 3914 drvp->drive_flags &= ~DRIVE_DMA; 3915 off = 0xa0 + chp->channel * 16; 3916 if (drvp->UDMA_mode > 2 && 3917 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 3918 drvp->UDMA_mode = 2; 3919 scsc = pciide_pci_read(pc, pa, 0x8a); 3920 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 3921 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 3922 scsc = pciide_pci_read(pc, pa, 0x8a); 3923 if ((scsc & 0x30) == 0) 3924 drvp->UDMA_mode = 5; 3925 } 3926 mode |= 0x03 << (drive * 4); 3927 off = 0xac + chp->channel * 16 + drive * 2; 3928 val = pciide_pci_read(pc, pa, off) & ~0x3f; 3929 if (scsc & 0x30) 3930 val |= udma2_tbl[drvp->UDMA_mode]; 3931 else 3932 val |= udma_tbl[drvp->UDMA_mode]; 3933 pciide_pci_write(pc, pa, off, val); 3934 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3935 } else if (drvp->drive_flags & DRIVE_DMA) { 3936 mode |= 0x02 << (drive * 4); 3937 off = 0xa8 + chp->channel * 16 + drive * 2; 3938 val = dma_tbl[drvp->DMA_mode]; 3939 pciide_pci_write(pc, pa, off, val & 0xff); 3940 pciide_pci_write(pc, pa, off, val >> 8); 3941 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3942 } else { 3943 mode |= 0x01 << (drive * 4); 3944 off = 0xa4 + chp->channel * 16 + drive * 2; 3945 val = pio_tbl[drvp->PIO_mode]; 3946 pciide_pci_write(pc, pa, off, val & 0xff); 3947 pciide_pci_write(pc, pa, off, val >> 8); 3948 } 3949 } 3950 3951 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 3952 if (idedma_ctl != 0) { 3953 /* Add software bits in status register */ 3954 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3955 IDEDMA_CTL(chp->channel), 3956 idedma_ctl); 3957 } 3958 pciide_print_modes(cp); 3959 } 3960 3961 /* 3962 * When the Silicon Image 3112 retries a PCI memory read command, 3963 * it may retry it as a memory read multiple command under some 3964 * circumstances. This can totally confuse some PCI controllers, 3965 * so ensure that it will never do this by making sure that the 3966 * Read Threshold (FIFO Read Request Control) field of the FIFO 3967 * Valid Byte Count and Control registers for both channels (BA5 3968 * offset 0x40 and 0x44) are set to be at least as large as the 3969 * cacheline size register. 3970 */ 3971 void 3972 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 3973 { 3974 pcireg_t cls, reg40, reg44; 3975 3976 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 3977 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 3978 cls *= 4; 3979 if (cls > 224) { 3980 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 3981 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 3982 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 3983 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 3984 cls = 224; 3985 } 3986 if (cls < 32) 3987 cls = 32; 3988 cls = (cls + 31) / 32; 3989 reg40 = ba5_read_4(sc, 0x40); 3990 reg44 = ba5_read_4(sc, 0x44); 3991 if ((reg40 & 0x7) < cls) 3992 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 3993 if ((reg44 & 0x7) < cls) 3994 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 3995 } 3996 3997 void 3998 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3999 { 4000 struct pciide_channel *cp; 4001 bus_size_t cmdsize, ctlsize; 4002 pcireg_t interface, scs_cmd, cfgctl; 4003 int channel; 4004 struct pciide_satalink *sl; 4005 4006 /* Allocate memory for private data */ 4007 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4008 sl = sc->sc_cookie; 4009 4010 sc->chip_unmap = default_chip_unmap; 4011 4012 #define SII3112_RESET_BITS \ 4013 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4014 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4015 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 4016 4017 /* 4018 * Reset everything and then unblock all of the interrupts. 4019 */ 4020 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4021 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4022 scs_cmd | SII3112_RESET_BITS); 4023 delay(50 * 1000); 4024 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4025 scs_cmd & SCS_CMD_BA5_EN); 4026 delay(50 * 1000); 4027 4028 if (scs_cmd & SCS_CMD_BA5_EN) { 4029 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4030 PCI_MAPREG_TYPE_MEM | 4031 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4032 &sl->ba5_st, &sl->ba5_sh, 4033 NULL, NULL, 0) != 0) 4034 printf(": unable to map BA5 register space\n"); 4035 else 4036 sl->ba5_en = 1; 4037 } else { 4038 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 4039 SII3112_PCI_CFGCTL); 4040 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 4041 cfgctl | CFGCTL_BA5INDEN); 4042 } 4043 4044 printf(": DMA"); 4045 pciide_mapreg_dma(sc, pa); 4046 printf("\n"); 4047 4048 /* 4049 * Rev. <= 0x01 of the 3112 have a bug that can cause data 4050 * corruption if DMA transfers cross an 8K boundary. This is 4051 * apparently hard to tickle, but we'll go ahead and play it 4052 * safe. 4053 */ 4054 if (sc->sc_rev <= 0x01) { 4055 sc->sc_dma_maxsegsz = 8192; 4056 sc->sc_dma_boundary = 8192; 4057 } 4058 4059 sii_fixup_cacheline(sc, pa); 4060 4061 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4062 sc->sc_wdcdev.PIO_cap = 4; 4063 if (sc->sc_dma_ok) { 4064 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4065 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4066 sc->sc_wdcdev.irqack = pciide_irqack; 4067 sc->sc_wdcdev.DMA_cap = 2; 4068 sc->sc_wdcdev.UDMA_cap = 6; 4069 } 4070 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4071 4072 /* We can use SControl and SStatus to probe for drives. */ 4073 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4074 4075 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4076 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4077 4078 /* 4079 * The 3112 either identifies itself as a RAID storage device 4080 * or a Misc storage device. Fake up the interface bits for 4081 * what our driver expects. 4082 */ 4083 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4084 interface = PCI_INTERFACE(pa->pa_class); 4085 } else { 4086 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4087 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4088 } 4089 4090 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4091 cp = &sc->pciide_channels[channel]; 4092 if (pciide_chansetup(sc, channel, interface) == 0) 4093 continue; 4094 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4095 pciide_pci_intr); 4096 if (cp->hw_ok == 0) 4097 continue; 4098 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4099 } 4100 } 4101 4102 void 4103 sii3112_setup_channel(struct channel_softc *chp) 4104 { 4105 struct ata_drive_datas *drvp; 4106 int drive; 4107 u_int32_t idedma_ctl, dtm; 4108 struct pciide_channel *cp = (struct pciide_channel *)chp; 4109 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4110 4111 /* setup DMA if needed */ 4112 pciide_channel_dma_setup(cp); 4113 4114 idedma_ctl = 0; 4115 dtm = 0; 4116 4117 for (drive = 0; drive < 2; drive++) { 4118 drvp = &chp->ch_drive[drive]; 4119 /* If no drive, skip */ 4120 if ((drvp->drive_flags & DRIVE) == 0) 4121 continue; 4122 if (drvp->drive_flags & DRIVE_UDMA) { 4123 /* use Ultra/DMA */ 4124 drvp->drive_flags &= ~DRIVE_DMA; 4125 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4126 dtm |= DTM_IDEx_DMA; 4127 } else if (drvp->drive_flags & DRIVE_DMA) { 4128 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4129 dtm |= DTM_IDEx_DMA; 4130 } else { 4131 dtm |= DTM_IDEx_PIO; 4132 } 4133 } 4134 4135 /* 4136 * Nothing to do to setup modes; it is meaningless in S-ATA 4137 * (but many S-ATA drives still want to get the SET_FEATURE 4138 * command). 4139 */ 4140 if (idedma_ctl != 0) { 4141 /* Add software bits in status register */ 4142 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 4143 } 4144 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 4145 pciide_print_modes(cp); 4146 } 4147 4148 void 4149 sii3112_drv_probe(struct channel_softc *chp) 4150 { 4151 struct pciide_channel *cp = (struct pciide_channel *)chp; 4152 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4153 uint32_t scontrol, sstatus; 4154 uint8_t scnt, sn, cl, ch; 4155 int i, s; 4156 4157 /* XXX This should be done by other code. */ 4158 for (i = 0; i < 2; i++) { 4159 chp->ch_drive[i].chnl_softc = chp; 4160 chp->ch_drive[i].drive = i; 4161 } 4162 4163 /* 4164 * The 3112 is a 2-port part, and only has one drive per channel 4165 * (each port emulates a master drive). 4166 * 4167 * The 3114 is similar, but has 4 channels. 4168 */ 4169 4170 /* 4171 * Request communication initialization sequence, any speed. 4172 * Performing this is the equivalent of an ATA Reset. 4173 */ 4174 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4175 4176 /* 4177 * XXX We don't yet support SATA power management; disable all 4178 * power management state transitions. 4179 */ 4180 scontrol |= SControl_IPM_NONE; 4181 4182 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4183 delay(50 * 1000); 4184 scontrol &= ~SControl_DET_INIT; 4185 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4186 delay(50 * 1000); 4187 4188 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4189 #if 0 4190 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4191 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4192 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4193 #endif 4194 switch (sstatus & SStatus_DET_mask) { 4195 case SStatus_DET_NODEV: 4196 /* No device; be silent. */ 4197 break; 4198 4199 case SStatus_DET_DEV_NE: 4200 printf("%s: port %d: device connected, but " 4201 "communication not established\n", 4202 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4203 break; 4204 4205 case SStatus_DET_OFFLINE: 4206 printf("%s: port %d: PHY offline\n", 4207 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4208 break; 4209 4210 case SStatus_DET_DEV: 4211 /* 4212 * XXX ATAPI detection doesn't currently work. Don't 4213 * XXX know why. But, it's not like the standard method 4214 * XXX can detect an ATAPI device connected via a SATA/PATA 4215 * XXX bridge, so at least this is no worse. --thorpej 4216 */ 4217 if (chp->_vtbl != NULL) 4218 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4219 else 4220 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4221 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4222 delay(10); /* 400ns delay */ 4223 /* Save register contents. */ 4224 if (chp->_vtbl != NULL) { 4225 scnt = CHP_READ_REG(chp, wdr_seccnt); 4226 sn = CHP_READ_REG(chp, wdr_sector); 4227 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4228 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4229 } else { 4230 scnt = bus_space_read_1(chp->cmd_iot, 4231 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4232 sn = bus_space_read_1(chp->cmd_iot, 4233 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4234 cl = bus_space_read_1(chp->cmd_iot, 4235 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4236 ch = bus_space_read_1(chp->cmd_iot, 4237 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4238 } 4239 #if 0 4240 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4241 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4242 scnt, sn, cl, ch); 4243 #endif 4244 /* 4245 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4246 * cases we get wrong values here, so ignore it. 4247 */ 4248 s = splbio(); 4249 if (cl == 0x14 && ch == 0xeb) 4250 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4251 else 4252 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4253 splx(s); 4254 4255 printf("%s: port %d: device present", 4256 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4257 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4258 case 1: 4259 printf(", speed: 1.5Gb/s"); 4260 break; 4261 case 2: 4262 printf(", speed: 3.0Gb/s"); 4263 break; 4264 } 4265 printf("\n"); 4266 break; 4267 4268 default: 4269 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4270 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4271 } 4272 } 4273 4274 void 4275 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4276 { 4277 struct pciide_channel *cp; 4278 pcireg_t scs_cmd; 4279 pci_intr_handle_t intrhandle; 4280 const char *intrstr; 4281 int channel; 4282 struct pciide_satalink *sl; 4283 4284 /* Allocate memory for private data */ 4285 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4286 sl = sc->sc_cookie; 4287 4288 #define SII3114_RESET_BITS \ 4289 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4290 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4291 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4292 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4293 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4294 4295 /* 4296 * Reset everything and then unblock all of the interrupts. 4297 */ 4298 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4299 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4300 scs_cmd | SII3114_RESET_BITS); 4301 delay(50 * 1000); 4302 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4303 scs_cmd & SCS_CMD_M66EN); 4304 delay(50 * 1000); 4305 4306 /* 4307 * On the 3114, the BA5 register space is always enabled. In 4308 * order to use the 3114 in any sane way, we must use this BA5 4309 * register space, and so we consider it an error if we cannot 4310 * map it. 4311 * 4312 * As a consequence of using BA5, our register mapping is different 4313 * from a normal PCI IDE controller's, and so we are unable to use 4314 * most of the common PCI IDE register mapping functions. 4315 */ 4316 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4317 PCI_MAPREG_TYPE_MEM | 4318 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4319 &sl->ba5_st, &sl->ba5_sh, 4320 NULL, NULL, 0) != 0) { 4321 printf(": unable to map BA5 register space\n"); 4322 return; 4323 } 4324 sl->ba5_en = 1; 4325 4326 /* 4327 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4328 * channel 2. This is required at all times for proper operation 4329 * when using the BA5 register space (otherwise interrupts from 4330 * all 4 channels won't work). 4331 */ 4332 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4333 4334 printf(": DMA"); 4335 sii3114_mapreg_dma(sc, pa); 4336 printf("\n"); 4337 4338 sii_fixup_cacheline(sc, pa); 4339 4340 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4341 sc->sc_wdcdev.PIO_cap = 4; 4342 if (sc->sc_dma_ok) { 4343 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4344 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4345 sc->sc_wdcdev.irqack = pciide_irqack; 4346 sc->sc_wdcdev.DMA_cap = 2; 4347 sc->sc_wdcdev.UDMA_cap = 6; 4348 } 4349 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4350 4351 /* We can use SControl and SStatus to probe for drives. */ 4352 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4353 4354 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4355 sc->sc_wdcdev.nchannels = 4; 4356 4357 /* Map and establish the interrupt handler. */ 4358 if (pci_intr_map(pa, &intrhandle) != 0) { 4359 printf("%s: couldn't map native-PCI interrupt\n", 4360 sc->sc_wdcdev.sc_dev.dv_xname); 4361 return; 4362 } 4363 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4364 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4365 /* XXX */ 4366 pciide_pci_intr, sc, 4367 sc->sc_wdcdev.sc_dev.dv_xname); 4368 if (sc->sc_pci_ih != NULL) { 4369 printf("%s: using %s for native-PCI interrupt\n", 4370 sc->sc_wdcdev.sc_dev.dv_xname, 4371 intrstr ? intrstr : "unknown interrupt"); 4372 } else { 4373 printf("%s: couldn't establish native-PCI interrupt", 4374 sc->sc_wdcdev.sc_dev.dv_xname); 4375 if (intrstr != NULL) 4376 printf(" at %s", intrstr); 4377 printf("\n"); 4378 return; 4379 } 4380 4381 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4382 cp = &sc->pciide_channels[channel]; 4383 if (sii3114_chansetup(sc, channel) == 0) 4384 continue; 4385 sii3114_mapchan(cp); 4386 if (cp->hw_ok == 0) 4387 continue; 4388 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4389 } 4390 } 4391 4392 void 4393 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4394 { 4395 int chan, reg; 4396 bus_size_t size; 4397 struct pciide_satalink *sl = sc->sc_cookie; 4398 4399 sc->sc_wdcdev.dma_arg = sc; 4400 sc->sc_wdcdev.dma_init = pciide_dma_init; 4401 sc->sc_wdcdev.dma_start = pciide_dma_start; 4402 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4403 4404 /* 4405 * Slice off a subregion of BA5 for each of the channel's DMA 4406 * registers. 4407 */ 4408 4409 sc->sc_dma_iot = sl->ba5_st; 4410 for (chan = 0; chan < 4; chan++) { 4411 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4412 size = 4; 4413 if (size > (IDEDMA_SCH_OFFSET - reg)) 4414 size = IDEDMA_SCH_OFFSET - reg; 4415 if (bus_space_subregion(sl->ba5_st, 4416 sl->ba5_sh, 4417 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4418 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4419 sc->sc_dma_ok = 0; 4420 printf(": can't subregion offset " 4421 "%lu size %lu", 4422 (u_long) satalink_ba5_regmap[ 4423 chan].ba5_IDEDMA_CMD + reg, 4424 (u_long) size); 4425 return; 4426 } 4427 } 4428 } 4429 4430 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4431 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4432 sc->sc_dmactl_read = sii3114_dmactl_read; 4433 sc->sc_dmactl_write = sii3114_dmactl_write; 4434 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4435 4436 /* DMA registers all set up! */ 4437 sc->sc_dmat = pa->pa_dmat; 4438 sc->sc_dma_ok = 1; 4439 } 4440 4441 int 4442 sii3114_chansetup(struct pciide_softc *sc, int channel) 4443 { 4444 static const char *channel_names[] = { 4445 "port 0", 4446 "port 1", 4447 "port 2", 4448 "port 3", 4449 }; 4450 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4451 4452 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4453 4454 /* 4455 * We must always keep the Interrupt Steering bit set in channel 2's 4456 * IDEDMA_CMD register. 4457 */ 4458 if (channel == 2) 4459 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4460 4461 cp->name = channel_names[channel]; 4462 cp->wdc_channel.channel = channel; 4463 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4464 cp->wdc_channel.ch_queue = 4465 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 4466 if (cp->wdc_channel.ch_queue == NULL) { 4467 printf("%s %s channel: " 4468 "can't allocate memory for command queue", 4469 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4470 return (0); 4471 } 4472 return (1); 4473 } 4474 4475 void 4476 sii3114_mapchan(struct pciide_channel *cp) 4477 { 4478 struct channel_softc *wdc_cp = &cp->wdc_channel; 4479 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4480 struct pciide_satalink *sl = sc->sc_cookie; 4481 int chan = wdc_cp->channel; 4482 int i; 4483 4484 cp->hw_ok = 0; 4485 cp->compat = 0; 4486 cp->ih = sc->sc_pci_ih; 4487 4488 sl->regs[chan].cmd_iot = sl->ba5_st; 4489 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4490 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4491 9, &sl->regs[chan].cmd_baseioh) != 0) { 4492 printf("%s: couldn't subregion %s cmd base\n", 4493 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4494 return; 4495 } 4496 4497 sl->regs[chan].ctl_iot = sl->ba5_st; 4498 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4499 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4500 1, &cp->ctl_baseioh) != 0) { 4501 printf("%s: couldn't subregion %s ctl base\n", 4502 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4503 return; 4504 } 4505 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4506 4507 for (i = 0; i < WDC_NREG; i++) { 4508 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4509 sl->regs[chan].cmd_baseioh, 4510 i, i == 0 ? 4 : 1, 4511 &sl->regs[chan].cmd_iohs[i]) != 0) { 4512 printf("%s: couldn't subregion %s channel " 4513 "cmd regs\n", 4514 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4515 return; 4516 } 4517 } 4518 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4519 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4520 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4521 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4522 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4523 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4524 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4525 wdcattach(wdc_cp); 4526 cp->hw_ok = 1; 4527 } 4528 4529 u_int8_t 4530 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4531 { 4532 struct pciide_channel *cp = (struct pciide_channel *)chp; 4533 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4534 struct pciide_satalink *sl = sc->sc_cookie; 4535 4536 if (reg & _WDC_AUX) 4537 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4538 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4539 else 4540 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4541 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4542 } 4543 4544 void 4545 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4546 { 4547 struct pciide_channel *cp = (struct pciide_channel *)chp; 4548 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4549 struct pciide_satalink *sl = sc->sc_cookie; 4550 4551 if (reg & _WDC_AUX) 4552 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4553 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4554 else 4555 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4556 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4557 0, val); 4558 } 4559 4560 u_int8_t 4561 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4562 { 4563 struct pciide_satalink *sl = sc->sc_cookie; 4564 4565 return (bus_space_read_1(sc->sc_dma_iot, 4566 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4567 } 4568 4569 void 4570 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4571 { 4572 struct pciide_satalink *sl = sc->sc_cookie; 4573 4574 bus_space_write_1(sc->sc_dma_iot, 4575 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4576 } 4577 4578 u_int8_t 4579 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4580 { 4581 struct pciide_satalink *sl = sc->sc_cookie; 4582 4583 return (bus_space_read_1(sc->sc_dma_iot, 4584 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4585 } 4586 4587 void 4588 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4589 { 4590 struct pciide_satalink *sl = sc->sc_cookie; 4591 4592 bus_space_write_1(sc->sc_dma_iot, 4593 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4594 } 4595 4596 void 4597 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4598 { 4599 struct pciide_satalink *sl = sc->sc_cookie; 4600 4601 bus_space_write_4(sc->sc_dma_iot, 4602 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4603 } 4604 4605 void 4606 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4607 { 4608 struct pciide_channel *cp; 4609 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4610 bus_size_t cmdsize, ctlsize; 4611 struct pciide_cy *cy; 4612 4613 /* Allocate memory for private data */ 4614 sc->sc_cookie = malloc(sizeof(*cy), M_DEVBUF, M_NOWAIT | M_ZERO); 4615 cy = sc->sc_cookie; 4616 4617 /* 4618 * this chip has 2 PCI IDE functions, one for primary and one for 4619 * secondary. So we need to call pciide_mapregs_compat() with 4620 * the real channel 4621 */ 4622 if (pa->pa_function == 1) { 4623 cy->cy_compatchan = 0; 4624 } else if (pa->pa_function == 2) { 4625 cy->cy_compatchan = 1; 4626 } else { 4627 printf(": unexpected PCI function %d\n", pa->pa_function); 4628 return; 4629 } 4630 4631 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4632 printf(": DMA"); 4633 pciide_mapreg_dma(sc, pa); 4634 } else { 4635 printf(": no DMA"); 4636 sc->sc_dma_ok = 0; 4637 } 4638 4639 cy->cy_handle = cy82c693_init(pa->pa_iot); 4640 if (cy->cy_handle == NULL) { 4641 printf(", (unable to map ctl registers)"); 4642 sc->sc_dma_ok = 0; 4643 } 4644 4645 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4646 WDC_CAPABILITY_MODE; 4647 if (sc->sc_dma_ok) { 4648 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4649 sc->sc_wdcdev.irqack = pciide_irqack; 4650 } 4651 sc->sc_wdcdev.PIO_cap = 4; 4652 sc->sc_wdcdev.DMA_cap = 2; 4653 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4654 4655 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4656 sc->sc_wdcdev.nchannels = 1; 4657 4658 /* Only one channel for this chip; if we are here it's enabled */ 4659 cp = &sc->pciide_channels[0]; 4660 sc->wdc_chanarray[0] = &cp->wdc_channel; 4661 cp->name = PCIIDE_CHANNEL_NAME(0); 4662 cp->wdc_channel.channel = 0; 4663 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4664 cp->wdc_channel.ch_queue = 4665 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 4666 if (cp->wdc_channel.ch_queue == NULL) { 4667 printf(": cannot allocate memory for command queue\n"); 4668 return; 4669 } 4670 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4671 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4672 "configured" : "wired"); 4673 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4674 printf("native-PCI\n"); 4675 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4676 pciide_pci_intr); 4677 } else { 4678 printf("compatibility\n"); 4679 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4680 &cmdsize, &ctlsize); 4681 } 4682 4683 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4684 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4685 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4686 if (cp->hw_ok == 0) 4687 return; 4688 wdcattach(&cp->wdc_channel); 4689 if (pciide_chan_candisable(cp)) { 4690 pci_conf_write(sc->sc_pc, sc->sc_tag, 4691 PCI_COMMAND_STATUS_REG, 0); 4692 } 4693 if (cp->hw_ok == 0) { 4694 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4695 interface); 4696 return; 4697 } 4698 4699 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4700 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4701 cy693_setup_channel(&cp->wdc_channel); 4702 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4703 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4704 } 4705 4706 void 4707 cy693_setup_channel(struct channel_softc *chp) 4708 { 4709 struct ata_drive_datas *drvp; 4710 int drive; 4711 u_int32_t cy_cmd_ctrl; 4712 u_int32_t idedma_ctl; 4713 struct pciide_channel *cp = (struct pciide_channel *)chp; 4714 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4715 int dma_mode = -1; 4716 struct pciide_cy *cy = sc->sc_cookie; 4717 4718 cy_cmd_ctrl = idedma_ctl = 0; 4719 4720 /* setup DMA if needed */ 4721 pciide_channel_dma_setup(cp); 4722 4723 for (drive = 0; drive < 2; drive++) { 4724 drvp = &chp->ch_drive[drive]; 4725 /* If no drive, skip */ 4726 if ((drvp->drive_flags & DRIVE) == 0) 4727 continue; 4728 /* add timing values, setup DMA if needed */ 4729 if (drvp->drive_flags & DRIVE_DMA) { 4730 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4731 /* use Multiword DMA */ 4732 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 4733 dma_mode = drvp->DMA_mode; 4734 } 4735 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4736 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 4737 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4738 CY_CMD_CTRL_IOW_REC_OFF(drive)); 4739 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4740 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 4741 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4742 CY_CMD_CTRL_IOR_REC_OFF(drive)); 4743 } 4744 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 4745 chp->ch_drive[0].DMA_mode = dma_mode; 4746 chp->ch_drive[1].DMA_mode = dma_mode; 4747 4748 if (dma_mode == -1) 4749 dma_mode = 0; 4750 4751 if (cy->cy_handle != NULL) { 4752 /* Note: `multiple' is implied. */ 4753 cy82c693_write(cy->cy_handle, 4754 (cy->cy_compatchan == 0) ? 4755 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 4756 } 4757 4758 pciide_print_modes(cp); 4759 4760 if (idedma_ctl != 0) { 4761 /* Add software bits in status register */ 4762 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4763 IDEDMA_CTL(chp->channel), idedma_ctl); 4764 } 4765 } 4766 4767 static struct sis_hostbr_type { 4768 u_int16_t id; 4769 u_int8_t rev; 4770 u_int8_t udma_mode; 4771 char *name; 4772 u_int8_t type; 4773 #define SIS_TYPE_NOUDMA 0 4774 #define SIS_TYPE_66 1 4775 #define SIS_TYPE_100OLD 2 4776 #define SIS_TYPE_100NEW 3 4777 #define SIS_TYPE_133OLD 4 4778 #define SIS_TYPE_133NEW 5 4779 #define SIS_TYPE_SOUTH 6 4780 } sis_hostbr_type[] = { 4781 /* Most infos here are from sos@freebsd.org */ 4782 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 4783 #if 0 4784 /* 4785 * controllers associated to a rev 0x2 530 Host to PCI Bridge 4786 * have problems with UDMA (info provided by Christos) 4787 */ 4788 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 4789 #endif 4790 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 4791 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 4792 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 4793 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 4794 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 4795 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 4796 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 4797 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 4798 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 4799 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 4800 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 4801 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 4802 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 4803 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 4804 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 4805 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 4806 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 4807 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 4808 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 4809 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 4810 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 4811 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 4812 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 4813 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 4814 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 4815 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 4816 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 4817 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 4818 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 4819 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 4820 /* 4821 * From sos@freebsd.org: the 0x961 ID will never be found in real world 4822 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 4823 */ 4824 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 4825 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 4826 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 4827 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW} 4828 }; 4829 4830 static struct sis_hostbr_type *sis_hostbr_type_match; 4831 4832 int 4833 sis_hostbr_match(struct pci_attach_args *pa) 4834 { 4835 int i; 4836 4837 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 4838 return (0); 4839 sis_hostbr_type_match = NULL; 4840 for (i = 0; 4841 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 4842 i++) { 4843 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 4844 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 4845 sis_hostbr_type_match = &sis_hostbr_type[i]; 4846 } 4847 return (sis_hostbr_type_match != NULL); 4848 } 4849 4850 int 4851 sis_south_match(struct pci_attach_args *pa) 4852 { 4853 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 4854 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 4855 PCI_REVISION(pa->pa_class) >= 0x10); 4856 } 4857 4858 void 4859 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4860 { 4861 struct pciide_channel *cp; 4862 int channel; 4863 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 4864 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4865 int rev = sc->sc_rev; 4866 bus_size_t cmdsize, ctlsize; 4867 struct pciide_sis *sis; 4868 4869 /* Allocate memory for private data */ 4870 sc->sc_cookie = malloc(sizeof(*sis), M_DEVBUF, M_NOWAIT | M_ZERO); 4871 sis = sc->sc_cookie; 4872 4873 pci_find_device(NULL, sis_hostbr_match); 4874 4875 if (sis_hostbr_type_match) { 4876 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 4877 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 4878 pciide_pci_read(sc->sc_pc, sc->sc_tag, 4879 SIS_REG_57) & 0x7f); 4880 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 4881 sis->sis_type = SIS_TYPE_133NEW; 4882 sc->sc_wdcdev.UDMA_cap = 4883 sis_hostbr_type_match->udma_mode; 4884 } else { 4885 if (pci_find_device(NULL, sis_south_match)) { 4886 sis->sis_type = SIS_TYPE_133OLD; 4887 sc->sc_wdcdev.UDMA_cap = 4888 sis_hostbr_type_match->udma_mode; 4889 } else { 4890 sis->sis_type = SIS_TYPE_100NEW; 4891 sc->sc_wdcdev.UDMA_cap = 4892 sis_hostbr_type_match->udma_mode; 4893 } 4894 } 4895 } else { 4896 sis->sis_type = sis_hostbr_type_match->type; 4897 sc->sc_wdcdev.UDMA_cap = 4898 sis_hostbr_type_match->udma_mode; 4899 } 4900 printf(": %s", sis_hostbr_type_match->name); 4901 } else { 4902 printf(": 5597/5598"); 4903 if (rev >= 0xd0) { 4904 sc->sc_wdcdev.UDMA_cap = 2; 4905 sis->sis_type = SIS_TYPE_66; 4906 } else { 4907 sc->sc_wdcdev.UDMA_cap = 0; 4908 sis->sis_type = SIS_TYPE_NOUDMA; 4909 } 4910 } 4911 4912 printf(": DMA"); 4913 pciide_mapreg_dma(sc, pa); 4914 4915 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4916 WDC_CAPABILITY_MODE; 4917 if (sc->sc_dma_ok) { 4918 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4919 sc->sc_wdcdev.irqack = pciide_irqack; 4920 if (sis->sis_type >= SIS_TYPE_66) 4921 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4922 } 4923 4924 sc->sc_wdcdev.PIO_cap = 4; 4925 sc->sc_wdcdev.DMA_cap = 2; 4926 4927 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4928 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4929 switch (sis->sis_type) { 4930 case SIS_TYPE_NOUDMA: 4931 case SIS_TYPE_66: 4932 case SIS_TYPE_100OLD: 4933 sc->sc_wdcdev.set_modes = sis_setup_channel; 4934 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 4935 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 4936 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 4937 break; 4938 case SIS_TYPE_100NEW: 4939 case SIS_TYPE_133OLD: 4940 sc->sc_wdcdev.set_modes = sis_setup_channel; 4941 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 4942 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 4943 break; 4944 case SIS_TYPE_133NEW: 4945 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 4946 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 4947 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 4948 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 4949 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 4950 break; 4951 } 4952 4953 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4954 4955 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4956 cp = &sc->pciide_channels[channel]; 4957 if (pciide_chansetup(sc, channel, interface) == 0) 4958 continue; 4959 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 4960 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 4961 printf("%s: %s ignored (disabled)\n", 4962 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4963 continue; 4964 } 4965 pciide_map_compat_intr(pa, cp, channel, interface); 4966 if (cp->hw_ok == 0) 4967 continue; 4968 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4969 pciide_pci_intr); 4970 if (cp->hw_ok == 0) { 4971 pciide_unmap_compat_intr(pa, cp, channel, interface); 4972 continue; 4973 } 4974 if (pciide_chan_candisable(cp)) { 4975 if (channel == 0) 4976 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 4977 else 4978 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 4979 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 4980 sis_ctr0); 4981 } 4982 if (cp->hw_ok == 0) { 4983 pciide_unmap_compat_intr(pa, cp, channel, interface); 4984 continue; 4985 } 4986 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4987 } 4988 } 4989 4990 void 4991 sis96x_setup_channel(struct channel_softc *chp) 4992 { 4993 struct ata_drive_datas *drvp; 4994 int drive; 4995 u_int32_t sis_tim; 4996 u_int32_t idedma_ctl; 4997 int regtim; 4998 struct pciide_channel *cp = (struct pciide_channel *)chp; 4999 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5000 5001 sis_tim = 0; 5002 idedma_ctl = 0; 5003 /* setup DMA if needed */ 5004 pciide_channel_dma_setup(cp); 5005 5006 for (drive = 0; drive < 2; drive++) { 5007 regtim = SIS_TIM133( 5008 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 5009 chp->channel, drive); 5010 drvp = &chp->ch_drive[drive]; 5011 /* If no drive, skip */ 5012 if ((drvp->drive_flags & DRIVE) == 0) 5013 continue; 5014 /* add timing values, setup DMA if needed */ 5015 if (drvp->drive_flags & DRIVE_UDMA) { 5016 /* use Ultra/DMA */ 5017 drvp->drive_flags &= ~DRIVE_DMA; 5018 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5019 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 5020 if (drvp->UDMA_mode > 2) 5021 drvp->UDMA_mode = 2; 5022 } 5023 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 5024 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5025 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5026 } else if (drvp->drive_flags & DRIVE_DMA) { 5027 /* 5028 * use Multiword DMA 5029 * Timings will be used for both PIO and DMA, 5030 * so adjust DMA mode if needed 5031 */ 5032 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5033 drvp->PIO_mode = drvp->DMA_mode + 2; 5034 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5035 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5036 drvp->PIO_mode - 2 : 0; 5037 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 5038 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5039 } else { 5040 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5041 } 5042 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 5043 "channel %d drive %d: 0x%x (reg 0x%x)\n", 5044 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 5045 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 5046 } 5047 if (idedma_ctl != 0) { 5048 /* Add software bits in status register */ 5049 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5050 IDEDMA_CTL(chp->channel), idedma_ctl); 5051 } 5052 pciide_print_modes(cp); 5053 } 5054 5055 void 5056 sis_setup_channel(struct channel_softc *chp) 5057 { 5058 struct ata_drive_datas *drvp; 5059 int drive; 5060 u_int32_t sis_tim; 5061 u_int32_t idedma_ctl; 5062 struct pciide_channel *cp = (struct pciide_channel *)chp; 5063 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5064 struct pciide_sis *sis = sc->sc_cookie; 5065 5066 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 5067 "channel %d 0x%x\n", chp->channel, 5068 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 5069 DEBUG_PROBE); 5070 sis_tim = 0; 5071 idedma_ctl = 0; 5072 /* setup DMA if needed */ 5073 pciide_channel_dma_setup(cp); 5074 5075 for (drive = 0; drive < 2; drive++) { 5076 drvp = &chp->ch_drive[drive]; 5077 /* If no drive, skip */ 5078 if ((drvp->drive_flags & DRIVE) == 0) 5079 continue; 5080 /* add timing values, setup DMA if needed */ 5081 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5082 (drvp->drive_flags & DRIVE_UDMA) == 0) 5083 goto pio; 5084 5085 if (drvp->drive_flags & DRIVE_UDMA) { 5086 /* use Ultra/DMA */ 5087 drvp->drive_flags &= ~DRIVE_DMA; 5088 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5089 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 5090 if (drvp->UDMA_mode > 2) 5091 drvp->UDMA_mode = 2; 5092 } 5093 switch (sis->sis_type) { 5094 case SIS_TYPE_66: 5095 case SIS_TYPE_100OLD: 5096 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 5097 SIS_TIM66_UDMA_TIME_OFF(drive); 5098 break; 5099 case SIS_TYPE_100NEW: 5100 sis_tim |= 5101 sis_udma100new_tim[drvp->UDMA_mode] << 5102 SIS_TIM100_UDMA_TIME_OFF(drive); 5103 break; 5104 case SIS_TYPE_133OLD: 5105 sis_tim |= 5106 sis_udma133old_tim[drvp->UDMA_mode] << 5107 SIS_TIM100_UDMA_TIME_OFF(drive); 5108 break; 5109 default: 5110 printf("unknown SiS IDE type %d\n", 5111 sis->sis_type); 5112 } 5113 } else { 5114 /* 5115 * use Multiword DMA 5116 * Timings will be used for both PIO and DMA, 5117 * so adjust DMA mode if needed 5118 */ 5119 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5120 drvp->PIO_mode = drvp->DMA_mode + 2; 5121 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5122 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5123 drvp->PIO_mode - 2 : 0; 5124 if (drvp->DMA_mode == 0) 5125 drvp->PIO_mode = 0; 5126 } 5127 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5128 pio: switch (sis->sis_type) { 5129 case SIS_TYPE_NOUDMA: 5130 case SIS_TYPE_66: 5131 case SIS_TYPE_100OLD: 5132 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5133 SIS_TIM66_ACT_OFF(drive); 5134 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5135 SIS_TIM66_REC_OFF(drive); 5136 break; 5137 case SIS_TYPE_100NEW: 5138 case SIS_TYPE_133OLD: 5139 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5140 SIS_TIM100_ACT_OFF(drive); 5141 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5142 SIS_TIM100_REC_OFF(drive); 5143 break; 5144 default: 5145 printf("unknown SiS IDE type %d\n", 5146 sis->sis_type); 5147 } 5148 } 5149 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 5150 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 5151 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 5152 if (idedma_ctl != 0) { 5153 /* Add software bits in status register */ 5154 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5155 IDEDMA_CTL(chp->channel), idedma_ctl); 5156 } 5157 pciide_print_modes(cp); 5158 } 5159 5160 void 5161 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5162 { 5163 struct pciide_channel *cp; 5164 int channel; 5165 pcireg_t interface, ctl; 5166 bus_size_t cmdsize, ctlsize; 5167 5168 printf(": DMA"); 5169 pciide_mapreg_dma(sc, pa); 5170 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 5171 5172 if (sc->sc_dma_ok) { 5173 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5174 sc->sc_wdcdev.irqack = natsemi_irqack; 5175 } 5176 5177 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5178 5179 /* 5180 * Mask off interrupts from both channels, appropriate channel(s) 5181 * will be unmasked later. 5182 */ 5183 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5184 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5185 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5186 5187 sc->sc_wdcdev.PIO_cap = 4; 5188 sc->sc_wdcdev.DMA_cap = 2; 5189 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5190 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5191 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5192 5193 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5194 PCI_CLASS_REG)); 5195 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5196 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5197 5198 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5199 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5200 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5201 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5202 else 5203 ctl |= NATSEMI_CTRL1_INTAMASK; 5204 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5205 5206 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5207 cp = &sc->pciide_channels[channel]; 5208 if (pciide_chansetup(sc, channel, interface) == 0) 5209 continue; 5210 5211 pciide_map_compat_intr(pa, cp, channel, interface); 5212 if (cp->hw_ok == 0) 5213 continue; 5214 5215 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5216 natsemi_pci_intr); 5217 if (cp->hw_ok == 0) { 5218 pciide_unmap_compat_intr(pa, cp, channel, interface); 5219 continue; 5220 } 5221 natsemi_setup_channel(&cp->wdc_channel); 5222 } 5223 } 5224 5225 void 5226 natsemi_setup_channel(struct channel_softc *chp) 5227 { 5228 struct ata_drive_datas *drvp; 5229 int drive, ndrives = 0; 5230 u_int32_t idedma_ctl = 0; 5231 struct pciide_channel *cp = (struct pciide_channel *)chp; 5232 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5233 u_int8_t tim; 5234 5235 /* setup DMA if needed */ 5236 pciide_channel_dma_setup(cp); 5237 5238 for (drive = 0; drive < 2; drive++) { 5239 drvp = &chp->ch_drive[drive]; 5240 /* If no drive, skip */ 5241 if ((drvp->drive_flags & DRIVE) == 0) 5242 continue; 5243 5244 ndrives++; 5245 /* add timing values, setup DMA if needed */ 5246 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5247 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5248 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5249 } else { 5250 /* 5251 * use Multiword DMA 5252 * Timings will be used for both PIO and DMA, 5253 * so adjust DMA mode if needed 5254 */ 5255 if (drvp->PIO_mode >= 3 && 5256 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5257 drvp->DMA_mode = drvp->PIO_mode - 2; 5258 } 5259 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5260 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5261 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5262 } 5263 5264 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5265 NATSEMI_RTREG(chp->channel, drive), tim); 5266 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5267 NATSEMI_WTREG(chp->channel, drive), tim); 5268 } 5269 if (idedma_ctl != 0) { 5270 /* Add software bits in status register */ 5271 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5272 IDEDMA_CTL(chp->channel), idedma_ctl); 5273 } 5274 if (ndrives > 0) { 5275 /* Unmask the channel if at least one drive is found */ 5276 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5277 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5278 ~(NATSEMI_CHMASK(chp->channel))); 5279 } 5280 5281 pciide_print_modes(cp); 5282 5283 /* Go ahead and ack interrupts generated during probe. */ 5284 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5285 IDEDMA_CTL(chp->channel), 5286 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5287 IDEDMA_CTL(chp->channel))); 5288 } 5289 5290 void 5291 natsemi_irqack(struct channel_softc *chp) 5292 { 5293 struct pciide_channel *cp = (struct pciide_channel *)chp; 5294 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5295 u_int8_t clr; 5296 5297 /* The "clear" bits are in the wrong register *sigh* */ 5298 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5299 IDEDMA_CMD(chp->channel)); 5300 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5301 IDEDMA_CTL(chp->channel)) & 5302 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5303 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5304 IDEDMA_CMD(chp->channel), clr); 5305 } 5306 5307 int 5308 natsemi_pci_intr(void *arg) 5309 { 5310 struct pciide_softc *sc = arg; 5311 struct pciide_channel *cp; 5312 struct channel_softc *wdc_cp; 5313 int i, rv, crv; 5314 u_int8_t msk; 5315 5316 rv = 0; 5317 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5318 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5319 cp = &sc->pciide_channels[i]; 5320 wdc_cp = &cp->wdc_channel; 5321 5322 /* If a compat channel skip. */ 5323 if (cp->compat) 5324 continue; 5325 5326 /* If this channel is masked, skip it. */ 5327 if (msk & NATSEMI_CHMASK(i)) 5328 continue; 5329 5330 if (pciide_intr_flag(cp) == 0) 5331 continue; 5332 5333 crv = wdcintr(wdc_cp); 5334 if (crv == 0) 5335 ; /* leave rv alone */ 5336 else if (crv == 1) 5337 rv = 1; /* claim the intr */ 5338 else if (rv == 0) /* crv should be -1 in this case */ 5339 rv = crv; /* if we've done no better, take it */ 5340 } 5341 return (rv); 5342 } 5343 5344 void 5345 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5346 { 5347 struct pciide_channel *cp; 5348 int channel; 5349 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5350 bus_size_t cmdsize, ctlsize; 5351 5352 printf(": DMA"); 5353 pciide_mapreg_dma(sc, pa); 5354 5355 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5356 WDC_CAPABILITY_MODE; 5357 if (sc->sc_dma_ok) { 5358 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5359 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5360 sc->sc_wdcdev.irqack = pciide_irqack; 5361 } 5362 sc->sc_wdcdev.PIO_cap = 4; 5363 sc->sc_wdcdev.DMA_cap = 2; 5364 sc->sc_wdcdev.UDMA_cap = 2; 5365 5366 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5367 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5368 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5369 5370 /* 5371 * Soekris net4801 errata 0003: 5372 * 5373 * The SC1100 built in busmaster IDE controller is pretty standard, 5374 * but have two bugs: data transfers need to be dword aligned and 5375 * it cannot do an exact 64Kbyte data transfer. 5376 * 5377 * Assume that reducing maximum segment size by one page 5378 * will be enough, and restrict boundary too for extra certainty. 5379 */ 5380 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5381 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5382 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5383 } 5384 5385 /* 5386 * This chip seems to be unable to do one-sector transfers 5387 * using DMA. 5388 */ 5389 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5390 5391 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5392 5393 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5394 cp = &sc->pciide_channels[channel]; 5395 if (pciide_chansetup(sc, channel, interface) == 0) 5396 continue; 5397 pciide_map_compat_intr(pa, cp, channel, interface); 5398 if (cp->hw_ok == 0) 5399 continue; 5400 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5401 pciide_pci_intr); 5402 if (cp->hw_ok == 0) { 5403 pciide_unmap_compat_intr(pa, cp, channel, interface); 5404 continue; 5405 } 5406 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5407 } 5408 } 5409 5410 void 5411 ns_scx200_setup_channel(struct channel_softc *chp) 5412 { 5413 struct ata_drive_datas *drvp; 5414 int drive, mode; 5415 u_int32_t idedma_ctl; 5416 struct pciide_channel *cp = (struct pciide_channel*)chp; 5417 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5418 int channel = chp->channel; 5419 int pioformat; 5420 pcireg_t piotim, dmatim; 5421 5422 /* Setup DMA if needed */ 5423 pciide_channel_dma_setup(cp); 5424 5425 idedma_ctl = 0; 5426 5427 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5428 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5429 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5430 DEBUG_PROBE); 5431 5432 /* Per channel settings */ 5433 for (drive = 0; drive < 2; drive++) { 5434 drvp = &chp->ch_drive[drive]; 5435 5436 /* If no drive, skip */ 5437 if ((drvp->drive_flags & DRIVE) == 0) 5438 continue; 5439 5440 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5441 SCx200_TIM_PIO(channel, drive)); 5442 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5443 SCx200_TIM_DMA(channel, drive)); 5444 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5445 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5446 piotim, dmatim), DEBUG_PROBE); 5447 5448 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5449 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5450 /* Setup UltraDMA mode */ 5451 drvp->drive_flags &= ~DRIVE_DMA; 5452 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5453 dmatim = scx200_udma33[drvp->UDMA_mode]; 5454 mode = drvp->PIO_mode; 5455 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5456 (drvp->drive_flags & DRIVE_DMA) != 0) { 5457 /* Setup multiword DMA mode */ 5458 drvp->drive_flags &= ~DRIVE_UDMA; 5459 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5460 dmatim = scx200_dma33[drvp->DMA_mode]; 5461 5462 /* mode = min(pio, dma + 2) */ 5463 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5464 mode = drvp->PIO_mode; 5465 else 5466 mode = drvp->DMA_mode + 2; 5467 } else { 5468 mode = drvp->PIO_mode; 5469 } 5470 5471 /* Setup PIO mode */ 5472 drvp->PIO_mode = mode; 5473 if (mode < 2) 5474 drvp->DMA_mode = 0; 5475 else 5476 drvp->DMA_mode = mode - 2; 5477 5478 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5479 5480 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5481 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5482 piotim, dmatim), DEBUG_PROBE); 5483 5484 pci_conf_write(sc->sc_pc, sc->sc_tag, 5485 SCx200_TIM_PIO(channel, drive), piotim); 5486 pci_conf_write(sc->sc_pc, sc->sc_tag, 5487 SCx200_TIM_DMA(channel, drive), dmatim); 5488 } 5489 5490 if (idedma_ctl != 0) { 5491 /* Add software bits in status register */ 5492 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5493 IDEDMA_CTL(channel), idedma_ctl); 5494 } 5495 5496 pciide_print_modes(cp); 5497 } 5498 5499 void 5500 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5501 { 5502 struct pciide_channel *cp; 5503 int channel; 5504 pcireg_t cr, interface; 5505 bus_size_t cmdsize, ctlsize; 5506 int rev = sc->sc_rev; 5507 5508 printf(": DMA"); 5509 pciide_mapreg_dma(sc, pa); 5510 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5511 WDC_CAPABILITY_MODE; 5512 5513 if (sc->sc_dma_ok) { 5514 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5515 if (rev >= 0x20) { 5516 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5517 if (rev >= 0xC4) 5518 sc->sc_wdcdev.UDMA_cap = 5; 5519 else if (rev >= 0xC2) 5520 sc->sc_wdcdev.UDMA_cap = 4; 5521 else 5522 sc->sc_wdcdev.UDMA_cap = 2; 5523 } 5524 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5525 sc->sc_wdcdev.irqack = pciide_irqack; 5526 } 5527 5528 sc->sc_wdcdev.PIO_cap = 4; 5529 sc->sc_wdcdev.DMA_cap = 2; 5530 sc->sc_wdcdev.set_modes = acer_setup_channel; 5531 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5532 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5533 5534 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5535 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5536 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5537 5538 /* Enable "microsoft register bits" R/W. */ 5539 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5540 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5541 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5542 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5543 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5544 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5545 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5546 ~ACER_CHANSTATUSREGS_RO); 5547 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5548 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5549 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5550 /* Don't use cr, re-read the real register content instead */ 5551 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5552 PCI_CLASS_REG)); 5553 5554 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5555 5556 /* From linux: enable "Cable Detection" */ 5557 if (rev >= 0xC2) 5558 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5559 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5560 | ACER_0x4B_CDETECT); 5561 5562 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5563 cp = &sc->pciide_channels[channel]; 5564 if (pciide_chansetup(sc, channel, interface) == 0) 5565 continue; 5566 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5567 printf("%s: %s ignored (disabled)\n", 5568 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5569 continue; 5570 } 5571 pciide_map_compat_intr(pa, cp, channel, interface); 5572 if (cp->hw_ok == 0) 5573 continue; 5574 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5575 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5576 if (cp->hw_ok == 0) { 5577 pciide_unmap_compat_intr(pa, cp, channel, interface); 5578 continue; 5579 } 5580 if (pciide_chan_candisable(cp)) { 5581 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5582 pci_conf_write(sc->sc_pc, sc->sc_tag, 5583 PCI_CLASS_REG, cr); 5584 } 5585 if (cp->hw_ok == 0) { 5586 pciide_unmap_compat_intr(pa, cp, channel, interface); 5587 continue; 5588 } 5589 acer_setup_channel(&cp->wdc_channel); 5590 } 5591 } 5592 5593 void 5594 acer_setup_channel(struct channel_softc *chp) 5595 { 5596 struct ata_drive_datas *drvp; 5597 int drive; 5598 u_int32_t acer_fifo_udma; 5599 u_int32_t idedma_ctl; 5600 struct pciide_channel *cp = (struct pciide_channel *)chp; 5601 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5602 5603 idedma_ctl = 0; 5604 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5605 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5606 acer_fifo_udma), DEBUG_PROBE); 5607 /* setup DMA if needed */ 5608 pciide_channel_dma_setup(cp); 5609 5610 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5611 DRIVE_UDMA) { /* check 80 pins cable */ 5612 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5613 ACER_0x4A_80PIN(chp->channel)) { 5614 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5615 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5616 DEBUG_PROBE); 5617 if (chp->ch_drive[0].UDMA_mode > 2) 5618 chp->ch_drive[0].UDMA_mode = 2; 5619 if (chp->ch_drive[1].UDMA_mode > 2) 5620 chp->ch_drive[1].UDMA_mode = 2; 5621 } 5622 } 5623 5624 for (drive = 0; drive < 2; drive++) { 5625 drvp = &chp->ch_drive[drive]; 5626 /* If no drive, skip */ 5627 if ((drvp->drive_flags & DRIVE) == 0) 5628 continue; 5629 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5630 "channel %d drive %d 0x%x\n", chp->channel, drive, 5631 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5632 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5633 /* clear FIFO/DMA mode */ 5634 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5635 ACER_UDMA_EN(chp->channel, drive) | 5636 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5637 5638 /* add timing values, setup DMA if needed */ 5639 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5640 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5641 acer_fifo_udma |= 5642 ACER_FTH_OPL(chp->channel, drive, 0x1); 5643 goto pio; 5644 } 5645 5646 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5647 if (drvp->drive_flags & DRIVE_UDMA) { 5648 /* use Ultra/DMA */ 5649 drvp->drive_flags &= ~DRIVE_DMA; 5650 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5651 acer_fifo_udma |= 5652 ACER_UDMA_TIM(chp->channel, drive, 5653 acer_udma[drvp->UDMA_mode]); 5654 /* XXX disable if one drive < UDMA3 ? */ 5655 if (drvp->UDMA_mode >= 3) { 5656 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5657 ACER_0x4B, 5658 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5659 ACER_0x4B) | ACER_0x4B_UDMA66); 5660 } 5661 } else { 5662 /* 5663 * use Multiword DMA 5664 * Timings will be used for both PIO and DMA, 5665 * so adjust DMA mode if needed 5666 */ 5667 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5668 drvp->PIO_mode = drvp->DMA_mode + 2; 5669 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5670 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5671 drvp->PIO_mode - 2 : 0; 5672 if (drvp->DMA_mode == 0) 5673 drvp->PIO_mode = 0; 5674 } 5675 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5676 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5677 ACER_IDETIM(chp->channel, drive), 5678 acer_pio[drvp->PIO_mode]); 5679 } 5680 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5681 acer_fifo_udma), DEBUG_PROBE); 5682 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5683 if (idedma_ctl != 0) { 5684 /* Add software bits in status register */ 5685 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5686 IDEDMA_CTL(chp->channel), idedma_ctl); 5687 } 5688 pciide_print_modes(cp); 5689 } 5690 5691 int 5692 acer_pci_intr(void *arg) 5693 { 5694 struct pciide_softc *sc = arg; 5695 struct pciide_channel *cp; 5696 struct channel_softc *wdc_cp; 5697 int i, rv, crv; 5698 u_int32_t chids; 5699 5700 rv = 0; 5701 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5702 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5703 cp = &sc->pciide_channels[i]; 5704 wdc_cp = &cp->wdc_channel; 5705 /* If a compat channel skip. */ 5706 if (cp->compat) 5707 continue; 5708 if (chids & ACER_CHIDS_INT(i)) { 5709 crv = wdcintr(wdc_cp); 5710 if (crv == 0) 5711 printf("%s:%d: bogus intr\n", 5712 sc->sc_wdcdev.sc_dev.dv_xname, i); 5713 else 5714 rv = 1; 5715 } 5716 } 5717 return (rv); 5718 } 5719 5720 void 5721 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5722 { 5723 struct pciide_channel *cp; 5724 int i, compatchan, revision; 5725 pcireg_t interface; 5726 bus_size_t cmdsize, ctlsize; 5727 5728 revision = sc->sc_rev; 5729 5730 /* 5731 * when the chip is in native mode it identifies itself as a 5732 * 'misc mass storage'. Fake interface in this case. 5733 */ 5734 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 5735 interface = PCI_INTERFACE(pa->pa_class); 5736 } else { 5737 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 5738 PCIIDE_INTERFACE_PCI(0); 5739 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5740 (revision == HPT370_REV || revision == HPT370A_REV || 5741 revision == HPT372_REV)) || 5742 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5743 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5744 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5745 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5746 interface |= PCIIDE_INTERFACE_PCI(1); 5747 } 5748 5749 printf(": DMA"); 5750 pciide_mapreg_dma(sc, pa); 5751 printf("\n"); 5752 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5753 WDC_CAPABILITY_MODE; 5754 if (sc->sc_dma_ok) { 5755 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5756 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5757 sc->sc_wdcdev.irqack = pciide_irqack; 5758 } 5759 sc->sc_wdcdev.PIO_cap = 4; 5760 sc->sc_wdcdev.DMA_cap = 2; 5761 5762 sc->sc_wdcdev.set_modes = hpt_setup_channel; 5763 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5764 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5765 revision == HPT366_REV) { 5766 sc->sc_wdcdev.UDMA_cap = 4; 5767 /* 5768 * The 366 has 2 PCI IDE functions, one for primary and one 5769 * for secondary. So we need to call pciide_mapregs_compat() 5770 * with the real channel 5771 */ 5772 if (pa->pa_function == 0) { 5773 compatchan = 0; 5774 } else if (pa->pa_function == 1) { 5775 compatchan = 1; 5776 } else { 5777 printf("%s: unexpected PCI function %d\n", 5778 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 5779 return; 5780 } 5781 sc->sc_wdcdev.nchannels = 1; 5782 } else { 5783 sc->sc_wdcdev.nchannels = 2; 5784 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5785 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5786 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5787 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5788 sc->sc_wdcdev.UDMA_cap = 6; 5789 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 5790 if (revision == HPT372_REV) 5791 sc->sc_wdcdev.UDMA_cap = 6; 5792 else 5793 sc->sc_wdcdev.UDMA_cap = 5; 5794 } 5795 } 5796 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5797 cp = &sc->pciide_channels[i]; 5798 if (sc->sc_wdcdev.nchannels > 1) { 5799 compatchan = i; 5800 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 5801 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 5802 printf("%s: %s ignored (disabled)\n", 5803 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5804 continue; 5805 } 5806 } 5807 if (pciide_chansetup(sc, i, interface) == 0) 5808 continue; 5809 if (interface & PCIIDE_INTERFACE_PCI(i)) { 5810 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 5811 &ctlsize, hpt_pci_intr); 5812 } else { 5813 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 5814 &cmdsize, &ctlsize); 5815 } 5816 if (cp->hw_ok == 0) 5817 return; 5818 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 5819 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 5820 wdcattach(&cp->wdc_channel); 5821 hpt_setup_channel(&cp->wdc_channel); 5822 } 5823 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5824 (revision == HPT370_REV || revision == HPT370A_REV || 5825 revision == HPT372_REV)) || 5826 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5827 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5828 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5829 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 5830 /* 5831 * Turn off fast interrupts 5832 */ 5833 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 5834 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 5835 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5836 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 5837 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 5838 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5839 5840 /* 5841 * HPT370 and highter has a bit to disable interrupts, 5842 * make sure to clear it 5843 */ 5844 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 5845 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 5846 ~HPT_CSEL_IRQDIS); 5847 } 5848 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 5849 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5850 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5851 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5852 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 5853 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5854 revision == HPT372_REV)) 5855 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 5856 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 5857 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 5858 5859 return; 5860 } 5861 5862 void 5863 hpt_setup_channel(struct channel_softc *chp) 5864 { 5865 struct ata_drive_datas *drvp; 5866 int drive; 5867 int cable; 5868 u_int32_t before, after; 5869 u_int32_t idedma_ctl; 5870 struct pciide_channel *cp = (struct pciide_channel *)chp; 5871 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5872 int revision = sc->sc_rev; 5873 u_int32_t *tim_pio, *tim_dma, *tim_udma; 5874 5875 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 5876 5877 /* setup DMA if needed */ 5878 pciide_channel_dma_setup(cp); 5879 5880 idedma_ctl = 0; 5881 5882 switch (sc->sc_pp->ide_product) { 5883 case PCI_PRODUCT_TRIONES_HPT366: 5884 if (revision == HPT370_REV || 5885 revision == HPT370A_REV) { 5886 tim_pio = hpt370_pio; 5887 tim_dma = hpt370_dma; 5888 tim_udma = hpt370_udma; 5889 } else if (revision == HPT372_REV) { 5890 tim_pio = hpt372_pio; 5891 tim_dma = hpt372_dma; 5892 tim_udma = hpt372_udma; 5893 } else { 5894 tim_pio = hpt366_pio; 5895 tim_dma = hpt366_dma; 5896 tim_udma = hpt366_udma; 5897 } 5898 break; 5899 case PCI_PRODUCT_TRIONES_HPT372A: 5900 case PCI_PRODUCT_TRIONES_HPT302: 5901 case PCI_PRODUCT_TRIONES_HPT371: 5902 tim_pio = hpt372_pio; 5903 tim_dma = hpt372_dma; 5904 tim_udma = hpt372_udma; 5905 break; 5906 case PCI_PRODUCT_TRIONES_HPT374: 5907 tim_pio = hpt374_pio; 5908 tim_dma = hpt374_dma; 5909 tim_udma = hpt374_udma; 5910 break; 5911 default: 5912 printf("%s: no known timing values\n", 5913 sc->sc_wdcdev.sc_dev.dv_xname); 5914 goto end; 5915 } 5916 5917 /* Per drive settings */ 5918 for (drive = 0; drive < 2; drive++) { 5919 drvp = &chp->ch_drive[drive]; 5920 /* If no drive, skip */ 5921 if ((drvp->drive_flags & DRIVE) == 0) 5922 continue; 5923 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 5924 HPT_IDETIM(chp->channel, drive)); 5925 5926 /* add timing values, setup DMA if needed */ 5927 if (drvp->drive_flags & DRIVE_UDMA) { 5928 /* use Ultra/DMA */ 5929 drvp->drive_flags &= ~DRIVE_DMA; 5930 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 5931 drvp->UDMA_mode > 2) { 5932 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5933 "cable not detected\n", drvp->drive_name, 5934 sc->sc_wdcdev.sc_dev.dv_xname, 5935 chp->channel, drive), DEBUG_PROBE); 5936 drvp->UDMA_mode = 2; 5937 } 5938 after = tim_udma[drvp->UDMA_mode]; 5939 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5940 } else if (drvp->drive_flags & DRIVE_DMA) { 5941 /* 5942 * use Multiword DMA. 5943 * Timings will be used for both PIO and DMA, so adjust 5944 * DMA mode if needed 5945 */ 5946 if (drvp->PIO_mode >= 3 && 5947 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5948 drvp->DMA_mode = drvp->PIO_mode - 2; 5949 } 5950 after = tim_dma[drvp->DMA_mode]; 5951 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5952 } else { 5953 /* PIO only */ 5954 after = tim_pio[drvp->PIO_mode]; 5955 } 5956 pci_conf_write(sc->sc_pc, sc->sc_tag, 5957 HPT_IDETIM(chp->channel, drive), after); 5958 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 5959 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 5960 after, before), DEBUG_PROBE); 5961 } 5962 end: 5963 if (idedma_ctl != 0) { 5964 /* Add software bits in status register */ 5965 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5966 IDEDMA_CTL(chp->channel), idedma_ctl); 5967 } 5968 pciide_print_modes(cp); 5969 } 5970 5971 int 5972 hpt_pci_intr(void *arg) 5973 { 5974 struct pciide_softc *sc = arg; 5975 struct pciide_channel *cp; 5976 struct channel_softc *wdc_cp; 5977 int rv = 0; 5978 int dmastat, i, crv; 5979 5980 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5981 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5982 IDEDMA_CTL(i)); 5983 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 5984 IDEDMA_CTL_INTR) 5985 continue; 5986 cp = &sc->pciide_channels[i]; 5987 wdc_cp = &cp->wdc_channel; 5988 crv = wdcintr(wdc_cp); 5989 if (crv == 0) { 5990 printf("%s:%d: bogus intr\n", 5991 sc->sc_wdcdev.sc_dev.dv_xname, i); 5992 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5993 IDEDMA_CTL(i), dmastat); 5994 } else 5995 rv = 1; 5996 } 5997 return (rv); 5998 } 5999 6000 /* Macros to test product */ 6001 #define PDC_IS_262(sc) \ 6002 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 6003 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6004 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 6005 #define PDC_IS_265(sc) \ 6006 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6007 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 6008 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6009 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6010 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6011 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6012 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6013 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6014 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6015 #define PDC_IS_268(sc) \ 6016 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6017 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6018 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6019 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6020 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6021 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6022 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6023 #define PDC_IS_269(sc) \ 6024 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6025 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6026 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6027 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6028 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6029 6030 u_int8_t 6031 pdc268_config_read(struct channel_softc *chp, int index) 6032 { 6033 struct pciide_channel *cp = (struct pciide_channel *)chp; 6034 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6035 int channel = chp->channel; 6036 6037 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6038 PDC268_INDEX(channel), index); 6039 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6040 PDC268_DATA(channel))); 6041 } 6042 6043 void 6044 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6045 { 6046 struct pciide_channel *cp; 6047 int channel; 6048 pcireg_t interface, st, mode; 6049 bus_size_t cmdsize, ctlsize; 6050 6051 if (!PDC_IS_268(sc)) { 6052 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6053 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 6054 st), DEBUG_PROBE); 6055 } 6056 6057 /* turn off RAID mode */ 6058 if (!PDC_IS_268(sc)) 6059 st &= ~PDC2xx_STATE_IDERAID; 6060 6061 /* 6062 * can't rely on the PCI_CLASS_REG content if the chip was in raid 6063 * mode. We have to fake interface 6064 */ 6065 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 6066 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 6067 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 6068 6069 printf(": DMA"); 6070 pciide_mapreg_dma(sc, pa); 6071 6072 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6073 WDC_CAPABILITY_MODE; 6074 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 6075 PDC_IS_262(sc)) 6076 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 6077 if (sc->sc_dma_ok) { 6078 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6079 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6080 sc->sc_wdcdev.irqack = pciide_irqack; 6081 } 6082 sc->sc_wdcdev.PIO_cap = 4; 6083 sc->sc_wdcdev.DMA_cap = 2; 6084 if (PDC_IS_269(sc)) 6085 sc->sc_wdcdev.UDMA_cap = 6; 6086 else if (PDC_IS_265(sc)) 6087 sc->sc_wdcdev.UDMA_cap = 5; 6088 else if (PDC_IS_262(sc)) 6089 sc->sc_wdcdev.UDMA_cap = 4; 6090 else 6091 sc->sc_wdcdev.UDMA_cap = 2; 6092 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 6093 pdc20268_setup_channel : pdc202xx_setup_channel; 6094 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6095 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6096 6097 if (PDC_IS_262(sc)) { 6098 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 6099 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 6100 } 6101 6102 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6103 if (!PDC_IS_268(sc)) { 6104 /* setup failsafe defaults */ 6105 mode = 0; 6106 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 6107 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 6108 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 6109 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 6110 for (channel = 0; 6111 channel < sc->sc_wdcdev.nchannels; 6112 channel++) { 6113 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6114 "drive 0 initial timings 0x%x, now 0x%x\n", 6115 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6116 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 6117 DEBUG_PROBE); 6118 pci_conf_write(sc->sc_pc, sc->sc_tag, 6119 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 6120 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6121 "drive 1 initial timings 0x%x, now 0x%x\n", 6122 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6123 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 6124 pci_conf_write(sc->sc_pc, sc->sc_tag, 6125 PDC2xx_TIM(channel, 1), mode); 6126 } 6127 6128 mode = PDC2xx_SCR_DMA; 6129 if (PDC_IS_262(sc)) { 6130 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 6131 } else { 6132 /* the BIOS set it up this way */ 6133 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 6134 } 6135 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 6136 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 6137 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 6138 "now 0x%x\n", 6139 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6140 PDC2xx_SCR), 6141 mode), DEBUG_PROBE); 6142 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6143 PDC2xx_SCR, mode); 6144 6145 /* controller initial state register is OK even without BIOS */ 6146 /* Set DMA mode to IDE DMA compatibility */ 6147 mode = 6148 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 6149 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 6150 DEBUG_PROBE); 6151 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 6152 mode | 0x1); 6153 mode = 6154 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 6155 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 6156 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 6157 mode | 0x1); 6158 } 6159 6160 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6161 cp = &sc->pciide_channels[channel]; 6162 if (pciide_chansetup(sc, channel, interface) == 0) 6163 continue; 6164 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 6165 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 6166 printf("%s: %s ignored (disabled)\n", 6167 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6168 continue; 6169 } 6170 pciide_map_compat_intr(pa, cp, channel, interface); 6171 if (cp->hw_ok == 0) 6172 continue; 6173 if (PDC_IS_265(sc)) 6174 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6175 pdc20265_pci_intr); 6176 else 6177 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6178 pdc202xx_pci_intr); 6179 if (cp->hw_ok == 0) { 6180 pciide_unmap_compat_intr(pa, cp, channel, interface); 6181 continue; 6182 } 6183 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6184 st &= ~(PDC_IS_262(sc) ? 6185 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6186 pciide_unmap_compat_intr(pa, cp, channel, interface); 6187 } 6188 if (PDC_IS_268(sc)) 6189 pdc20268_setup_channel(&cp->wdc_channel); 6190 else 6191 pdc202xx_setup_channel(&cp->wdc_channel); 6192 } 6193 if (!PDC_IS_268(sc)) { 6194 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6195 "0x%x\n", st), DEBUG_PROBE); 6196 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6197 } 6198 return; 6199 } 6200 6201 void 6202 pdc202xx_setup_channel(struct channel_softc *chp) 6203 { 6204 struct ata_drive_datas *drvp; 6205 int drive; 6206 pcireg_t mode, st; 6207 u_int32_t idedma_ctl, scr, atapi; 6208 struct pciide_channel *cp = (struct pciide_channel *)chp; 6209 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6210 int channel = chp->channel; 6211 6212 /* setup DMA if needed */ 6213 pciide_channel_dma_setup(cp); 6214 6215 idedma_ctl = 0; 6216 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6217 sc->sc_wdcdev.sc_dev.dv_xname, 6218 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6219 DEBUG_PROBE); 6220 6221 /* Per channel settings */ 6222 if (PDC_IS_262(sc)) { 6223 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6224 PDC262_U66); 6225 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6226 /* Check cable */ 6227 if ((st & PDC262_STATE_80P(channel)) != 0 && 6228 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6229 chp->ch_drive[0].UDMA_mode > 2) || 6230 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6231 chp->ch_drive[1].UDMA_mode > 2))) { 6232 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6233 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6234 DEBUG_PROBE); 6235 if (chp->ch_drive[0].UDMA_mode > 2) 6236 chp->ch_drive[0].UDMA_mode = 2; 6237 if (chp->ch_drive[1].UDMA_mode > 2) 6238 chp->ch_drive[1].UDMA_mode = 2; 6239 } 6240 /* Trim UDMA mode */ 6241 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6242 chp->ch_drive[0].UDMA_mode <= 2) || 6243 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6244 chp->ch_drive[1].UDMA_mode <= 2)) { 6245 if (chp->ch_drive[0].UDMA_mode > 2) 6246 chp->ch_drive[0].UDMA_mode = 2; 6247 if (chp->ch_drive[1].UDMA_mode > 2) 6248 chp->ch_drive[1].UDMA_mode = 2; 6249 } 6250 /* Set U66 if needed */ 6251 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6252 chp->ch_drive[0].UDMA_mode > 2) || 6253 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6254 chp->ch_drive[1].UDMA_mode > 2)) 6255 scr |= PDC262_U66_EN(channel); 6256 else 6257 scr &= ~PDC262_U66_EN(channel); 6258 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6259 PDC262_U66, scr); 6260 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6261 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6262 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6263 PDC262_ATAPI(channel))), DEBUG_PROBE); 6264 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6265 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6266 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6267 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6268 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6269 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6270 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6271 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6272 atapi = 0; 6273 else 6274 atapi = PDC262_ATAPI_UDMA; 6275 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6276 PDC262_ATAPI(channel), atapi); 6277 } 6278 } 6279 for (drive = 0; drive < 2; drive++) { 6280 drvp = &chp->ch_drive[drive]; 6281 /* If no drive, skip */ 6282 if ((drvp->drive_flags & DRIVE) == 0) 6283 continue; 6284 mode = 0; 6285 if (drvp->drive_flags & DRIVE_UDMA) { 6286 /* use Ultra/DMA */ 6287 drvp->drive_flags &= ~DRIVE_DMA; 6288 mode = PDC2xx_TIM_SET_MB(mode, 6289 pdc2xx_udma_mb[drvp->UDMA_mode]); 6290 mode = PDC2xx_TIM_SET_MC(mode, 6291 pdc2xx_udma_mc[drvp->UDMA_mode]); 6292 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6293 } else if (drvp->drive_flags & DRIVE_DMA) { 6294 mode = PDC2xx_TIM_SET_MB(mode, 6295 pdc2xx_dma_mb[drvp->DMA_mode]); 6296 mode = PDC2xx_TIM_SET_MC(mode, 6297 pdc2xx_dma_mc[drvp->DMA_mode]); 6298 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6299 } else { 6300 mode = PDC2xx_TIM_SET_MB(mode, 6301 pdc2xx_dma_mb[0]); 6302 mode = PDC2xx_TIM_SET_MC(mode, 6303 pdc2xx_dma_mc[0]); 6304 } 6305 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6306 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6307 if (drvp->drive_flags & DRIVE_ATA) 6308 mode |= PDC2xx_TIM_PRE; 6309 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6310 if (drvp->PIO_mode >= 3) { 6311 mode |= PDC2xx_TIM_IORDY; 6312 if (drive == 0) 6313 mode |= PDC2xx_TIM_IORDYp; 6314 } 6315 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6316 "timings 0x%x\n", 6317 sc->sc_wdcdev.sc_dev.dv_xname, 6318 chp->channel, drive, mode), DEBUG_PROBE); 6319 pci_conf_write(sc->sc_pc, sc->sc_tag, 6320 PDC2xx_TIM(chp->channel, drive), mode); 6321 } 6322 if (idedma_ctl != 0) { 6323 /* Add software bits in status register */ 6324 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6325 IDEDMA_CTL(channel), idedma_ctl); 6326 } 6327 pciide_print_modes(cp); 6328 } 6329 6330 void 6331 pdc20268_setup_channel(struct channel_softc *chp) 6332 { 6333 struct ata_drive_datas *drvp; 6334 int drive, cable; 6335 u_int32_t idedma_ctl; 6336 struct pciide_channel *cp = (struct pciide_channel *)chp; 6337 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6338 int channel = chp->channel; 6339 6340 /* check 80 pins cable */ 6341 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6342 6343 /* setup DMA if needed */ 6344 pciide_channel_dma_setup(cp); 6345 6346 idedma_ctl = 0; 6347 6348 for (drive = 0; drive < 2; drive++) { 6349 drvp = &chp->ch_drive[drive]; 6350 /* If no drive, skip */ 6351 if ((drvp->drive_flags & DRIVE) == 0) 6352 continue; 6353 if (drvp->drive_flags & DRIVE_UDMA) { 6354 /* use Ultra/DMA */ 6355 drvp->drive_flags &= ~DRIVE_DMA; 6356 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6357 if (cable && drvp->UDMA_mode > 2) { 6358 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6359 "cable not detected\n", drvp->drive_name, 6360 sc->sc_wdcdev.sc_dev.dv_xname, 6361 channel, drive), DEBUG_PROBE); 6362 drvp->UDMA_mode = 2; 6363 } 6364 } else if (drvp->drive_flags & DRIVE_DMA) { 6365 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6366 } 6367 } 6368 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6369 if (idedma_ctl != 0) { 6370 /* Add software bits in status register */ 6371 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6372 IDEDMA_CTL(channel), idedma_ctl); 6373 } 6374 pciide_print_modes(cp); 6375 } 6376 6377 int 6378 pdc202xx_pci_intr(void *arg) 6379 { 6380 struct pciide_softc *sc = arg; 6381 struct pciide_channel *cp; 6382 struct channel_softc *wdc_cp; 6383 int i, rv, crv; 6384 u_int32_t scr; 6385 6386 rv = 0; 6387 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6388 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6389 cp = &sc->pciide_channels[i]; 6390 wdc_cp = &cp->wdc_channel; 6391 /* If a compat channel skip. */ 6392 if (cp->compat) 6393 continue; 6394 if (scr & PDC2xx_SCR_INT(i)) { 6395 crv = wdcintr(wdc_cp); 6396 if (crv == 0) 6397 printf("%s:%d: bogus intr (reg 0x%x)\n", 6398 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6399 else 6400 rv = 1; 6401 } 6402 } 6403 return (rv); 6404 } 6405 6406 int 6407 pdc20265_pci_intr(void *arg) 6408 { 6409 struct pciide_softc *sc = arg; 6410 struct pciide_channel *cp; 6411 struct channel_softc *wdc_cp; 6412 int i, rv, crv; 6413 u_int32_t dmastat; 6414 6415 rv = 0; 6416 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6417 cp = &sc->pciide_channels[i]; 6418 wdc_cp = &cp->wdc_channel; 6419 /* If a compat channel skip. */ 6420 if (cp->compat) 6421 continue; 6422 6423 /* 6424 * In case of shared IRQ check that the interrupt 6425 * was actually generated by this channel. 6426 * Only check the channel that is enabled. 6427 */ 6428 if (cp->hw_ok && PDC_IS_268(sc)) { 6429 if ((pdc268_config_read(wdc_cp, 6430 0x0b) & PDC268_INTR) == 0) 6431 continue; 6432 } 6433 6434 /* 6435 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6436 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6437 * So use it instead (requires 2 reg reads instead of 1, 6438 * but we can't do it another way). 6439 */ 6440 dmastat = bus_space_read_1(sc->sc_dma_iot, 6441 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6442 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6443 continue; 6444 6445 crv = wdcintr(wdc_cp); 6446 if (crv == 0) 6447 printf("%s:%d: bogus intr\n", 6448 sc->sc_wdcdev.sc_dev.dv_xname, i); 6449 else 6450 rv = 1; 6451 } 6452 return (rv); 6453 } 6454 6455 void 6456 pdc20262_dma_start(void *v, int channel, int drive) 6457 { 6458 struct pciide_softc *sc = v; 6459 struct pciide_dma_maps *dma_maps = 6460 &sc->pciide_channels[channel].dma_maps[drive]; 6461 u_int8_t clock; 6462 u_int32_t count; 6463 6464 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6465 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6466 PDC262_U66); 6467 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6468 PDC262_U66, clock | PDC262_U66_EN(channel)); 6469 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6470 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6471 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6472 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6473 PDC262_ATAPI(channel), count); 6474 } 6475 6476 pciide_dma_start(v, channel, drive); 6477 } 6478 6479 int 6480 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6481 { 6482 struct pciide_softc *sc = v; 6483 struct pciide_dma_maps *dma_maps = 6484 &sc->pciide_channels[channel].dma_maps[drive]; 6485 u_int8_t clock; 6486 6487 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6488 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6489 PDC262_U66); 6490 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6491 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6492 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6493 PDC262_ATAPI(channel), 0); 6494 } 6495 6496 return (pciide_dma_finish(v, channel, drive, force)); 6497 } 6498 6499 void 6500 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6501 { 6502 struct pciide_channel *cp; 6503 struct channel_softc *wdc_cp; 6504 struct pciide_pdcsata *ps; 6505 int channel, i; 6506 bus_size_t dmasize; 6507 pci_intr_handle_t intrhandle; 6508 const char *intrstr; 6509 6510 /* Allocate memory for private data */ 6511 sc->sc_cookie = malloc(sizeof(*ps), M_DEVBUF, M_NOWAIT | M_ZERO); 6512 ps = sc->sc_cookie; 6513 6514 /* 6515 * Promise SATA controllers have 3 or 4 channels, 6516 * the usual IDE registers are mapped in I/O space, with offsets. 6517 */ 6518 if (pci_intr_map(pa, &intrhandle) != 0) { 6519 printf(": couldn't map interrupt\n"); 6520 return; 6521 } 6522 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6523 6524 switch (sc->sc_pp->ide_product) { 6525 case PCI_PRODUCT_PROMISE_PDC20318: 6526 case PCI_PRODUCT_PROMISE_PDC20319: 6527 case PCI_PRODUCT_PROMISE_PDC20371: 6528 case PCI_PRODUCT_PROMISE_PDC20375: 6529 case PCI_PRODUCT_PROMISE_PDC20376: 6530 case PCI_PRODUCT_PROMISE_PDC20377: 6531 case PCI_PRODUCT_PROMISE_PDC20378: 6532 case PCI_PRODUCT_PROMISE_PDC20379: 6533 default: 6534 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6535 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6536 sc->sc_wdcdev.sc_dev.dv_xname); 6537 break; 6538 6539 case PCI_PRODUCT_PROMISE_PDC40518: 6540 case PCI_PRODUCT_PROMISE_PDC40519: 6541 case PCI_PRODUCT_PROMISE_PDC40718: 6542 case PCI_PRODUCT_PROMISE_PDC40719: 6543 case PCI_PRODUCT_PROMISE_PDC40779: 6544 case PCI_PRODUCT_PROMISE_PDC20571: 6545 case PCI_PRODUCT_PROMISE_PDC20575: 6546 case PCI_PRODUCT_PROMISE_PDC20579: 6547 case PCI_PRODUCT_PROMISE_PDC20771: 6548 case PCI_PRODUCT_PROMISE_PDC20775: 6549 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6550 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6551 sc->sc_wdcdev.sc_dev.dv_xname); 6552 break; 6553 } 6554 6555 if (sc->sc_pci_ih == NULL) { 6556 printf(": couldn't establish native-PCI interrupt"); 6557 if (intrstr != NULL) 6558 printf(" at %s", intrstr); 6559 printf("\n"); 6560 return; 6561 } 6562 6563 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6564 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6565 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6566 if (!sc->sc_dma_ok) { 6567 printf(": couldn't map bus-master DMA registers\n"); 6568 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6569 return; 6570 } 6571 6572 sc->sc_dmat = pa->pa_dmat; 6573 6574 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6575 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6576 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6577 printf(": couldn't map IDE registers\n"); 6578 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6579 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6580 return; 6581 } 6582 6583 printf(": DMA\n"); 6584 6585 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6586 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6587 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6588 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6589 sc->sc_wdcdev.PIO_cap = 4; 6590 sc->sc_wdcdev.DMA_cap = 2; 6591 sc->sc_wdcdev.UDMA_cap = 6; 6592 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6593 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6594 6595 switch (sc->sc_pp->ide_product) { 6596 case PCI_PRODUCT_PROMISE_PDC20318: 6597 case PCI_PRODUCT_PROMISE_PDC20319: 6598 case PCI_PRODUCT_PROMISE_PDC20371: 6599 case PCI_PRODUCT_PROMISE_PDC20375: 6600 case PCI_PRODUCT_PROMISE_PDC20376: 6601 case PCI_PRODUCT_PROMISE_PDC20377: 6602 case PCI_PRODUCT_PROMISE_PDC20378: 6603 case PCI_PRODUCT_PROMISE_PDC20379: 6604 default: 6605 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6606 sc->sc_wdcdev.nchannels = 6607 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6608 PDC203xx_NCHANNELS : 3; 6609 break; 6610 6611 case PCI_PRODUCT_PROMISE_PDC40518: 6612 case PCI_PRODUCT_PROMISE_PDC40519: 6613 case PCI_PRODUCT_PROMISE_PDC40718: 6614 case PCI_PRODUCT_PROMISE_PDC40719: 6615 case PCI_PRODUCT_PROMISE_PDC40779: 6616 case PCI_PRODUCT_PROMISE_PDC20571: 6617 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6618 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6619 6620 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6621 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6622 6623 break; 6624 case PCI_PRODUCT_PROMISE_PDC20575: 6625 case PCI_PRODUCT_PROMISE_PDC20579: 6626 case PCI_PRODUCT_PROMISE_PDC20771: 6627 case PCI_PRODUCT_PROMISE_PDC20775: 6628 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6629 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6630 6631 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6632 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6633 6634 break; 6635 } 6636 6637 sc->sc_wdcdev.dma_arg = sc; 6638 sc->sc_wdcdev.dma_init = pciide_dma_init; 6639 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6640 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6641 6642 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6643 channel++) { 6644 cp = &sc->pciide_channels[channel]; 6645 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6646 6647 cp->ih = sc->sc_pci_ih; 6648 cp->name = NULL; 6649 cp->wdc_channel.channel = channel; 6650 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6651 cp->wdc_channel.ch_queue = 6652 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 6653 if (cp->wdc_channel.ch_queue == NULL) { 6654 printf("%s: channel %d: " 6655 "can't allocate memory for command queue\n", 6656 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6657 continue; 6658 } 6659 wdc_cp = &cp->wdc_channel; 6660 6661 ps->regs[channel].ctl_iot = ps->ba5_st; 6662 ps->regs[channel].cmd_iot = ps->ba5_st; 6663 6664 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6665 0x0238 + (channel << 7), 1, 6666 &ps->regs[channel].ctl_ioh) != 0) { 6667 printf("%s: couldn't map channel %d ctl regs\n", 6668 sc->sc_wdcdev.sc_dev.dv_xname, 6669 channel); 6670 continue; 6671 } 6672 for (i = 0; i < WDC_NREG; i++) { 6673 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6674 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6675 &ps->regs[channel].cmd_iohs[i]) != 0) { 6676 printf("%s: couldn't map channel %d cmd " 6677 "regs\n", 6678 sc->sc_wdcdev.sc_dev.dv_xname, 6679 channel); 6680 continue; 6681 } 6682 } 6683 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6684 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6685 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6686 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6687 wdc_cp->data32iot = wdc_cp->cmd_iot = 6688 ps->regs[channel].cmd_iot; 6689 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6690 ps->regs[channel].cmd_iohs[0]; 6691 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6692 6693 /* 6694 * Subregion de busmaster registers. They're spread all over 6695 * the controller's register space :(. They are also 4 bytes 6696 * sized, with some specific extentions in the extra bits. 6697 * It also seems that the IDEDMA_CTL register isn't available. 6698 */ 6699 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6700 0x260 + (channel << 7), 1, 6701 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 6702 printf("%s channel %d: can't subregion DMA " 6703 "registers\n", 6704 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6705 continue; 6706 } 6707 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6708 0x244 + (channel << 7), 4, 6709 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 6710 printf("%s channel %d: can't subregion DMA " 6711 "registers\n", 6712 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6713 continue; 6714 } 6715 6716 wdcattach(wdc_cp); 6717 bus_space_write_4(sc->sc_dma_iot, 6718 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6719 (bus_space_read_4(sc->sc_dma_iot, 6720 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6721 0) & ~0x00003f9f) | (channel + 1)); 6722 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6723 (channel + 1) << 2, 0x00000001); 6724 6725 pdc203xx_setup_channel(&cp->wdc_channel); 6726 } 6727 6728 printf("%s: using %s for native-PCI interrupt\n", 6729 sc->sc_wdcdev.sc_dev.dv_xname, 6730 intrstr ? intrstr : "unknown interrupt"); 6731 } 6732 6733 void 6734 pdc203xx_setup_channel(struct channel_softc *chp) 6735 { 6736 struct ata_drive_datas *drvp; 6737 struct pciide_channel *cp = (struct pciide_channel *)chp; 6738 int drive, s; 6739 6740 pciide_channel_dma_setup(cp); 6741 6742 for (drive = 0; drive < 2; drive++) { 6743 drvp = &chp->ch_drive[drive]; 6744 if ((drvp->drive_flags & DRIVE) == 0) 6745 continue; 6746 if (drvp->drive_flags & DRIVE_UDMA) { 6747 s = splbio(); 6748 drvp->drive_flags &= ~DRIVE_DMA; 6749 splx(s); 6750 } 6751 } 6752 pciide_print_modes(cp); 6753 } 6754 6755 int 6756 pdc203xx_pci_intr(void *arg) 6757 { 6758 struct pciide_softc *sc = arg; 6759 struct pciide_channel *cp; 6760 struct channel_softc *wdc_cp; 6761 struct pciide_pdcsata *ps = sc->sc_cookie; 6762 int i, rv, crv; 6763 u_int32_t scr; 6764 6765 rv = 0; 6766 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 6767 6768 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6769 cp = &sc->pciide_channels[i]; 6770 wdc_cp = &cp->wdc_channel; 6771 if (scr & (1 << (i + 1))) { 6772 crv = wdcintr(wdc_cp); 6773 if (crv == 0) { 6774 printf("%s:%d: bogus intr (reg 0x%x)\n", 6775 sc->sc_wdcdev.sc_dev.dv_xname, 6776 i, scr); 6777 } else 6778 rv = 1; 6779 } 6780 } 6781 6782 return (rv); 6783 } 6784 6785 int 6786 pdc205xx_pci_intr(void *arg) 6787 { 6788 struct pciide_softc *sc = arg; 6789 struct pciide_channel *cp; 6790 struct channel_softc *wdc_cp; 6791 struct pciide_pdcsata *ps = sc->sc_cookie; 6792 int i, rv, crv; 6793 u_int32_t scr, status; 6794 6795 rv = 0; 6796 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 6797 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 6798 6799 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 6800 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 6801 6802 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6803 cp = &sc->pciide_channels[i]; 6804 wdc_cp = &cp->wdc_channel; 6805 if (scr & (1 << (i + 1))) { 6806 crv = wdcintr(wdc_cp); 6807 if (crv == 0) { 6808 printf("%s:%d: bogus intr (reg 0x%x)\n", 6809 sc->sc_wdcdev.sc_dev.dv_xname, 6810 i, scr); 6811 } else 6812 rv = 1; 6813 } 6814 } 6815 return rv; 6816 } 6817 6818 void 6819 pdc203xx_irqack(struct channel_softc *chp) 6820 { 6821 struct pciide_channel *cp = (struct pciide_channel *)chp; 6822 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6823 struct pciide_pdcsata *ps = sc->sc_cookie; 6824 int chan = chp->channel; 6825 6826 bus_space_write_4(sc->sc_dma_iot, 6827 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 6828 (bus_space_read_4(sc->sc_dma_iot, 6829 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 6830 0) & ~0x00003f9f) | (chan + 1)); 6831 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6832 (chan + 1) << 2, 0x00000001); 6833 } 6834 6835 void 6836 pdc203xx_dma_start(void *v, int channel, int drive) 6837 { 6838 struct pciide_softc *sc = v; 6839 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6840 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6841 struct pciide_pdcsata *ps = sc->sc_cookie; 6842 6843 /* Write table address */ 6844 bus_space_write_4(sc->sc_dma_iot, 6845 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 6846 dma_maps->dmamap_table->dm_segs[0].ds_addr); 6847 6848 /* Start DMA engine */ 6849 bus_space_write_4(sc->sc_dma_iot, 6850 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6851 (bus_space_read_4(sc->sc_dma_iot, 6852 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6853 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 6854 } 6855 6856 int 6857 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 6858 { 6859 struct pciide_softc *sc = v; 6860 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6861 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6862 struct pciide_pdcsata *ps = sc->sc_cookie; 6863 6864 /* Stop DMA channel */ 6865 bus_space_write_4(sc->sc_dma_iot, 6866 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6867 (bus_space_read_4(sc->sc_dma_iot, 6868 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6869 0) & ~0x80)); 6870 6871 /* Unload the map of the data buffer */ 6872 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 6873 dma_maps->dmamap_xfer->dm_mapsize, 6874 (dma_maps->dma_flags & WDC_DMA_READ) ? 6875 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 6876 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 6877 6878 return (0); 6879 } 6880 6881 u_int8_t 6882 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 6883 { 6884 struct pciide_channel *cp = (struct pciide_channel *)chp; 6885 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6886 struct pciide_pdcsata *ps = sc->sc_cookie; 6887 u_int8_t val; 6888 6889 if (reg & _WDC_AUX) { 6890 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 6891 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 6892 } else { 6893 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 6894 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 6895 return (val); 6896 } 6897 } 6898 6899 void 6900 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 6901 { 6902 struct pciide_channel *cp = (struct pciide_channel *)chp; 6903 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6904 struct pciide_pdcsata *ps = sc->sc_cookie; 6905 6906 if (reg & _WDC_AUX) 6907 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 6908 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 6909 else 6910 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 6911 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 6912 0, val); 6913 } 6914 6915 void 6916 pdc205xx_do_reset(struct channel_softc *chp) 6917 { 6918 struct pciide_channel *cp = (struct pciide_channel *)chp; 6919 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6920 struct pciide_pdcsata *ps = sc->sc_cookie; 6921 u_int32_t scontrol; 6922 6923 wdc_do_reset(chp); 6924 6925 /* reset SATA */ 6926 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 6927 SCONTROL_WRITE(ps, chp->channel, scontrol); 6928 delay(50*1000); 6929 6930 scontrol &= ~SControl_DET_INIT; 6931 SCONTROL_WRITE(ps, chp->channel, scontrol); 6932 delay(50*1000); 6933 } 6934 6935 void 6936 pdc205xx_drv_probe(struct channel_softc *chp) 6937 { 6938 struct pciide_channel *cp = (struct pciide_channel *)chp; 6939 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6940 struct pciide_pdcsata *ps = sc->sc_cookie; 6941 bus_space_handle_t *iohs; 6942 u_int32_t scontrol, sstatus; 6943 u_int16_t scnt, sn, cl, ch; 6944 int i, s; 6945 6946 /* XXX This should be done by other code. */ 6947 for (i = 0; i < 2; i++) { 6948 chp->ch_drive[i].chnl_softc = chp; 6949 chp->ch_drive[i].drive = i; 6950 } 6951 6952 SCONTROL_WRITE(ps, chp->channel, 0); 6953 delay(50*1000); 6954 6955 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 6956 SCONTROL_WRITE(ps,chp->channel,scontrol); 6957 delay(50*1000); 6958 6959 scontrol &= ~SControl_DET_INIT; 6960 SCONTROL_WRITE(ps,chp->channel,scontrol); 6961 delay(50*1000); 6962 6963 sstatus = SSTATUS_READ(ps,chp->channel); 6964 6965 switch (sstatus & SStatus_DET_mask) { 6966 case SStatus_DET_NODEV: 6967 /* No Device; be silent. */ 6968 break; 6969 6970 case SStatus_DET_DEV_NE: 6971 printf("%s: port %d: device connected, but " 6972 "communication not established\n", 6973 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6974 break; 6975 6976 case SStatus_DET_OFFLINE: 6977 printf("%s: port %d: PHY offline\n", 6978 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6979 break; 6980 6981 case SStatus_DET_DEV: 6982 iohs = ps->regs[chp->channel].cmd_iohs; 6983 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 6984 WDSD_IBM); 6985 delay(10); /* 400ns delay */ 6986 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 6987 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 6988 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 6989 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 6990 #if 0 6991 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 6992 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 6993 scnt, sn, cl, ch); 6994 #endif 6995 /* 6996 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 6997 * cases we get wrong values here, so ignore it. 6998 */ 6999 s = splbio(); 7000 if (cl == 0x14 && ch == 0xeb) 7001 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7002 else 7003 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7004 splx(s); 7005 #if 0 7006 printf("%s: port %d: device present", 7007 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7008 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7009 case 1: 7010 printf(", speed: 1.5Gb/s"); 7011 break; 7012 case 2: 7013 printf(", speed: 3.0Gb/s"); 7014 break; 7015 } 7016 printf("\n"); 7017 #endif 7018 break; 7019 7020 default: 7021 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7022 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7023 } 7024 } 7025 7026 #ifdef notyet 7027 /* 7028 * Inline functions for accessing the timing registers of the 7029 * OPTi controller. 7030 * 7031 * These *MUST* disable interrupts as they need atomic access to 7032 * certain magic registers. Failure to adhere to this *will* 7033 * break things in subtle ways if the wdc registers are accessed 7034 * by an interrupt routine while this magic sequence is executing. 7035 */ 7036 static __inline__ u_int8_t 7037 opti_read_config(struct channel_softc *chp, int reg) 7038 { 7039 u_int8_t rv; 7040 int s = splhigh(); 7041 7042 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7043 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7044 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7045 7046 /* Followed by an 8-bit write of 0x3 to register #2 */ 7047 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7048 7049 /* Now we can read the required register */ 7050 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 7051 7052 /* Restore the real registers */ 7053 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7054 7055 splx(s); 7056 7057 return (rv); 7058 } 7059 7060 static __inline__ void 7061 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 7062 { 7063 int s = splhigh(); 7064 7065 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7066 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7067 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7068 7069 /* Followed by an 8-bit write of 0x3 to register #2 */ 7070 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7071 7072 /* Now we can write the required register */ 7073 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 7074 7075 /* Restore the real registers */ 7076 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7077 7078 splx(s); 7079 } 7080 7081 void 7082 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7083 { 7084 struct pciide_channel *cp; 7085 bus_size_t cmdsize, ctlsize; 7086 pcireg_t interface; 7087 u_int8_t init_ctrl; 7088 int channel; 7089 7090 printf(": DMA"); 7091 /* 7092 * XXXSCW: 7093 * There seem to be a couple of buggy revisions/implementations 7094 * of the OPTi pciide chipset. This kludge seems to fix one of 7095 * the reported problems (NetBSD PR/11644) but still fails for the 7096 * other (NetBSD PR/13151), although the latter may be due to other 7097 * issues too... 7098 */ 7099 if (sc->sc_rev <= 0x12) { 7100 printf(" (disabled)"); 7101 sc->sc_dma_ok = 0; 7102 sc->sc_wdcdev.cap = 0; 7103 } else { 7104 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 7105 pciide_mapreg_dma(sc, pa); 7106 } 7107 7108 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 7109 sc->sc_wdcdev.PIO_cap = 4; 7110 if (sc->sc_dma_ok) { 7111 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7112 sc->sc_wdcdev.irqack = pciide_irqack; 7113 sc->sc_wdcdev.DMA_cap = 2; 7114 } 7115 sc->sc_wdcdev.set_modes = opti_setup_channel; 7116 7117 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7118 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7119 7120 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 7121 OPTI_REG_INIT_CONTROL); 7122 7123 interface = PCI_INTERFACE(pa->pa_class); 7124 7125 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7126 7127 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7128 cp = &sc->pciide_channels[channel]; 7129 if (pciide_chansetup(sc, channel, interface) == 0) 7130 continue; 7131 if (channel == 1 && 7132 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 7133 printf("%s: %s ignored (disabled)\n", 7134 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7135 continue; 7136 } 7137 pciide_map_compat_intr(pa, cp, channel, interface); 7138 if (cp->hw_ok == 0) 7139 continue; 7140 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7141 pciide_pci_intr); 7142 if (cp->hw_ok == 0) { 7143 pciide_unmap_compat_intr(pa, cp, channel, interface); 7144 continue; 7145 } 7146 opti_setup_channel(&cp->wdc_channel); 7147 } 7148 } 7149 7150 void 7151 opti_setup_channel(struct channel_softc *chp) 7152 { 7153 struct ata_drive_datas *drvp; 7154 struct pciide_channel *cp = (struct pciide_channel *)chp; 7155 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7156 int drive, spd; 7157 int mode[2]; 7158 u_int8_t rv, mr; 7159 7160 /* 7161 * The `Delay' and `Address Setup Time' fields of the 7162 * Miscellaneous Register are always zero initially. 7163 */ 7164 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 7165 mr &= ~(OPTI_MISC_DELAY_MASK | 7166 OPTI_MISC_ADDR_SETUP_MASK | 7167 OPTI_MISC_INDEX_MASK); 7168 7169 /* Prime the control register before setting timing values */ 7170 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 7171 7172 /* Determine the clockrate of the PCIbus the chip is attached to */ 7173 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7174 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7175 7176 /* setup DMA if needed */ 7177 pciide_channel_dma_setup(cp); 7178 7179 for (drive = 0; drive < 2; drive++) { 7180 drvp = &chp->ch_drive[drive]; 7181 /* If no drive, skip */ 7182 if ((drvp->drive_flags & DRIVE) == 0) { 7183 mode[drive] = -1; 7184 continue; 7185 } 7186 7187 if ((drvp->drive_flags & DRIVE_DMA)) { 7188 /* 7189 * Timings will be used for both PIO and DMA, 7190 * so adjust DMA mode if needed 7191 */ 7192 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7193 drvp->PIO_mode = drvp->DMA_mode + 2; 7194 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7195 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7196 drvp->PIO_mode - 2 : 0; 7197 if (drvp->DMA_mode == 0) 7198 drvp->PIO_mode = 0; 7199 7200 mode[drive] = drvp->DMA_mode + 5; 7201 } else 7202 mode[drive] = drvp->PIO_mode; 7203 7204 if (drive && mode[0] >= 0 && 7205 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7206 /* 7207 * Can't have two drives using different values 7208 * for `Address Setup Time'. 7209 * Slow down the faster drive to compensate. 7210 */ 7211 int d = (opti_tim_as[spd][mode[0]] > 7212 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7213 7214 mode[d] = mode[1-d]; 7215 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7216 chp->ch_drive[d].DMA_mode = 0; 7217 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7218 } 7219 } 7220 7221 for (drive = 0; drive < 2; drive++) { 7222 int m; 7223 if ((m = mode[drive]) < 0) 7224 continue; 7225 7226 /* Set the Address Setup Time and select appropriate index */ 7227 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7228 rv |= OPTI_MISC_INDEX(drive); 7229 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7230 7231 /* Set the pulse width and recovery timing parameters */ 7232 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7233 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7234 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7235 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7236 7237 /* Set the Enhanced Mode register appropriately */ 7238 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7239 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7240 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7241 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7242 } 7243 7244 /* Finally, enable the timings */ 7245 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7246 7247 pciide_print_modes(cp); 7248 } 7249 #endif 7250 7251 void 7252 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7253 { 7254 struct pciide_channel *cp; 7255 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7256 pcitag_t pcib_tag; 7257 int channel; 7258 bus_size_t cmdsize, ctlsize; 7259 7260 printf(": DMA"); 7261 pciide_mapreg_dma(sc, pa); 7262 printf("\n"); 7263 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7264 WDC_CAPABILITY_MODE; 7265 7266 if (sc->sc_dma_ok) { 7267 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7268 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7269 sc->sc_wdcdev.irqack = pciide_irqack; 7270 } 7271 sc->sc_wdcdev.PIO_cap = 4; 7272 sc->sc_wdcdev.DMA_cap = 2; 7273 switch (sc->sc_pp->ide_product) { 7274 case PCI_PRODUCT_RCC_OSB4_IDE: 7275 sc->sc_wdcdev.UDMA_cap = 2; 7276 break; 7277 case PCI_PRODUCT_RCC_CSB5_IDE: 7278 if (sc->sc_rev < 0x92) 7279 sc->sc_wdcdev.UDMA_cap = 4; 7280 else 7281 sc->sc_wdcdev.UDMA_cap = 5; 7282 break; 7283 case PCI_PRODUCT_RCC_CSB6_IDE: 7284 sc->sc_wdcdev.UDMA_cap = 4; 7285 break; 7286 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7287 sc->sc_wdcdev.UDMA_cap = 5; 7288 break; 7289 } 7290 7291 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7292 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7293 sc->sc_wdcdev.nchannels = 7294 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7295 7296 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7297 cp = &sc->pciide_channels[channel]; 7298 if (pciide_chansetup(sc, channel, interface) == 0) 7299 continue; 7300 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7301 serverworks_pci_intr); 7302 if (cp->hw_ok == 0) 7303 return; 7304 pciide_map_compat_intr(pa, cp, channel, interface); 7305 if (cp->hw_ok == 0) 7306 return; 7307 serverworks_setup_channel(&cp->wdc_channel); 7308 } 7309 7310 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7311 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7312 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7313 } 7314 7315 void 7316 serverworks_setup_channel(struct channel_softc *chp) 7317 { 7318 struct ata_drive_datas *drvp; 7319 struct pciide_channel *cp = (struct pciide_channel *)chp; 7320 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7321 int channel = chp->channel; 7322 int drive, unit; 7323 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7324 u_int32_t idedma_ctl; 7325 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7326 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7327 7328 /* setup DMA if needed */ 7329 pciide_channel_dma_setup(cp); 7330 7331 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7332 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7333 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7334 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7335 7336 pio_time &= ~(0xffff << (16 * channel)); 7337 dma_time &= ~(0xffff << (16 * channel)); 7338 pio_mode &= ~(0xff << (8 * channel + 16)); 7339 udma_mode &= ~(0xff << (8 * channel + 16)); 7340 udma_mode &= ~(3 << (2 * channel)); 7341 7342 idedma_ctl = 0; 7343 7344 /* Per drive settings */ 7345 for (drive = 0; drive < 2; drive++) { 7346 drvp = &chp->ch_drive[drive]; 7347 /* If no drive, skip */ 7348 if ((drvp->drive_flags & DRIVE) == 0) 7349 continue; 7350 unit = drive + 2 * channel; 7351 /* add timing values, setup DMA if needed */ 7352 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7353 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7354 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7355 (drvp->drive_flags & DRIVE_UDMA)) { 7356 /* use Ultra/DMA, check for 80-pin cable */ 7357 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7358 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7359 PCI_SUBSYS_ID_REG)) & 7360 (1 << (14 + channel))) == 0) { 7361 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7362 "cable not detected\n", drvp->drive_name, 7363 sc->sc_wdcdev.sc_dev.dv_xname, 7364 channel, drive), DEBUG_PROBE); 7365 drvp->UDMA_mode = 2; 7366 } 7367 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7368 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7369 udma_mode |= 1 << unit; 7370 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7371 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7372 (drvp->drive_flags & DRIVE_DMA)) { 7373 /* use Multiword DMA */ 7374 drvp->drive_flags &= ~DRIVE_UDMA; 7375 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7376 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7377 } else { 7378 /* PIO only */ 7379 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7380 } 7381 } 7382 7383 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7384 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7385 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7386 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7387 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7388 7389 if (idedma_ctl != 0) { 7390 /* Add software bits in status register */ 7391 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7392 IDEDMA_CTL(channel), idedma_ctl); 7393 } 7394 pciide_print_modes(cp); 7395 } 7396 7397 int 7398 serverworks_pci_intr(void *arg) 7399 { 7400 struct pciide_softc *sc = arg; 7401 struct pciide_channel *cp; 7402 struct channel_softc *wdc_cp; 7403 int rv = 0; 7404 int dmastat, i, crv; 7405 7406 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7407 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7408 IDEDMA_CTL(i)); 7409 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7410 IDEDMA_CTL_INTR) 7411 continue; 7412 cp = &sc->pciide_channels[i]; 7413 wdc_cp = &cp->wdc_channel; 7414 crv = wdcintr(wdc_cp); 7415 if (crv == 0) { 7416 printf("%s:%d: bogus intr\n", 7417 sc->sc_wdcdev.sc_dev.dv_xname, i); 7418 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7419 IDEDMA_CTL(i), dmastat); 7420 } else 7421 rv = 1; 7422 } 7423 return (rv); 7424 } 7425 7426 void 7427 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7428 { 7429 struct pciide_channel *cp; 7430 pci_intr_handle_t intrhandle; 7431 const char *intrstr; 7432 int channel; 7433 struct pciide_svwsata *ss; 7434 7435 /* Allocate memory for private data */ 7436 sc->sc_cookie = malloc(sizeof(*ss), M_DEVBUF, M_NOWAIT | M_ZERO); 7437 ss = sc->sc_cookie; 7438 7439 /* The 4-port version has a dummy second function. */ 7440 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7441 PCI_MAPREG_START + 0x14) == 0) { 7442 printf("\n"); 7443 return; 7444 } 7445 7446 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7447 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7448 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7449 printf(": unable to map BA5 register space\n"); 7450 return; 7451 } 7452 7453 printf(": DMA"); 7454 svwsata_mapreg_dma(sc, pa); 7455 printf("\n"); 7456 7457 if (sc->sc_dma_ok) { 7458 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7459 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7460 sc->sc_wdcdev.irqack = pciide_irqack; 7461 } 7462 sc->sc_wdcdev.PIO_cap = 4; 7463 sc->sc_wdcdev.DMA_cap = 2; 7464 sc->sc_wdcdev.UDMA_cap = 6; 7465 7466 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7467 sc->sc_wdcdev.nchannels = 4; 7468 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7469 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7470 sc->sc_wdcdev.set_modes = sata_setup_channel; 7471 7472 /* We can use SControl and SStatus to probe for drives. */ 7473 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7474 7475 /* Map and establish the interrupt handler. */ 7476 if(pci_intr_map(pa, &intrhandle) != 0) { 7477 printf("%s: couldn't map native-PCI interrupt\n", 7478 sc->sc_wdcdev.sc_dev.dv_xname); 7479 return; 7480 } 7481 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7482 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7483 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7484 if (sc->sc_pci_ih != NULL) { 7485 printf("%s: using %s for native-PCI interrupt\n", 7486 sc->sc_wdcdev.sc_dev.dv_xname, 7487 intrstr ? intrstr : "unknown interrupt"); 7488 } else { 7489 printf("%s: couldn't establish native-PCI interrupt", 7490 sc->sc_wdcdev.sc_dev.dv_xname); 7491 if (intrstr != NULL) 7492 printf(" at %s", intrstr); 7493 printf("\n"); 7494 return; 7495 } 7496 7497 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7498 cp = &sc->pciide_channels[channel]; 7499 if (pciide_chansetup(sc, channel, 0) == 0) 7500 continue; 7501 svwsata_mapchan(cp); 7502 sata_setup_channel(&cp->wdc_channel); 7503 } 7504 } 7505 7506 void 7507 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7508 { 7509 struct pciide_svwsata *ss = sc->sc_cookie; 7510 7511 sc->sc_wdcdev.dma_arg = sc; 7512 sc->sc_wdcdev.dma_init = pciide_dma_init; 7513 sc->sc_wdcdev.dma_start = pciide_dma_start; 7514 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7515 7516 /* XXX */ 7517 sc->sc_dma_iot = ss->ba5_st; 7518 sc->sc_dma_ioh = ss->ba5_sh; 7519 7520 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7521 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7522 sc->sc_dmactl_read = svwsata_dmactl_read; 7523 sc->sc_dmactl_write = svwsata_dmactl_write; 7524 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7525 7526 /* DMA registers all set up! */ 7527 sc->sc_dmat = pa->pa_dmat; 7528 sc->sc_dma_ok = 1; 7529 } 7530 7531 u_int8_t 7532 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7533 { 7534 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7535 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7536 } 7537 7538 void 7539 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7540 { 7541 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7542 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7543 } 7544 7545 u_int8_t 7546 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7547 { 7548 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7549 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7550 } 7551 7552 void 7553 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7554 { 7555 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7556 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7557 } 7558 7559 void 7560 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7561 { 7562 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7563 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7564 } 7565 7566 void 7567 svwsata_mapchan(struct pciide_channel *cp) 7568 { 7569 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7570 struct channel_softc *wdc_cp = &cp->wdc_channel; 7571 struct pciide_svwsata *ss = sc->sc_cookie; 7572 7573 cp->compat = 0; 7574 cp->ih = sc->sc_pci_ih; 7575 7576 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7577 (wdc_cp->channel << 8) + SVWSATA_TF0, 7578 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7579 printf("%s: couldn't map %s cmd regs\n", 7580 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7581 return; 7582 } 7583 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7584 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7585 &wdc_cp->ctl_ioh) != 0) { 7586 printf("%s: couldn't map %s ctl regs\n", 7587 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7588 return; 7589 } 7590 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7591 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7592 wdcattach(wdc_cp); 7593 } 7594 7595 void 7596 svwsata_drv_probe(struct channel_softc *chp) 7597 { 7598 struct pciide_channel *cp = (struct pciide_channel *)chp; 7599 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7600 struct pciide_svwsata *ss = sc->sc_cookie; 7601 int channel = chp->channel; 7602 uint32_t scontrol, sstatus; 7603 uint8_t scnt, sn, cl, ch; 7604 int i, s; 7605 7606 /* XXX This should be done by other code. */ 7607 for (i = 0; i < 2; i++) { 7608 chp->ch_drive[i].chnl_softc = chp; 7609 chp->ch_drive[i].drive = i; 7610 } 7611 7612 /* 7613 * Request communication initialization sequence, any speed. 7614 * Performing this is the equivalent of an ATA Reset. 7615 */ 7616 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7617 7618 /* 7619 * XXX We don't yet support SATA power management; disable all 7620 * power management state transitions. 7621 */ 7622 scontrol |= SControl_IPM_NONE; 7623 7624 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7625 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7626 delay(50 * 1000); 7627 scontrol &= ~SControl_DET_INIT; 7628 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7629 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7630 delay(50 * 1000); 7631 7632 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7633 (channel << 8) + SVWSATA_SSTATUS); 7634 #if 0 7635 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7636 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7637 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7638 (channel << 8) + SVWSATA_SSTATUS)); 7639 #endif 7640 switch (sstatus & SStatus_DET_mask) { 7641 case SStatus_DET_NODEV: 7642 /* No device; be silent. */ 7643 break; 7644 7645 case SStatus_DET_DEV_NE: 7646 printf("%s: port %d: device connected, but " 7647 "communication not established\n", 7648 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7649 break; 7650 7651 case SStatus_DET_OFFLINE: 7652 printf("%s: port %d: PHY offline\n", 7653 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7654 break; 7655 7656 case SStatus_DET_DEV: 7657 /* 7658 * XXX ATAPI detection doesn't currently work. Don't 7659 * XXX know why. But, it's not like the standard method 7660 * XXX can detect an ATAPI device connected via a SATA/PATA 7661 * XXX bridge, so at least this is no worse. --thorpej 7662 */ 7663 if (chp->_vtbl != NULL) 7664 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7665 else 7666 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7667 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7668 delay(10); /* 400ns delay */ 7669 /* Save register contents. */ 7670 if (chp->_vtbl != NULL) { 7671 scnt = CHP_READ_REG(chp, wdr_seccnt); 7672 sn = CHP_READ_REG(chp, wdr_sector); 7673 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7674 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7675 } else { 7676 scnt = bus_space_read_1(chp->cmd_iot, 7677 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7678 sn = bus_space_read_1(chp->cmd_iot, 7679 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7680 cl = bus_space_read_1(chp->cmd_iot, 7681 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7682 ch = bus_space_read_1(chp->cmd_iot, 7683 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7684 } 7685 #if 0 7686 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7687 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7688 scnt, sn, cl, ch); 7689 #endif 7690 /* 7691 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7692 * cases we get wrong values here, so ignore it. 7693 */ 7694 s = splbio(); 7695 if (cl == 0x14 && ch == 0xeb) 7696 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7697 else 7698 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7699 splx(s); 7700 7701 printf("%s: port %d: device present", 7702 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7703 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7704 case 1: 7705 printf(", speed: 1.5Gb/s"); 7706 break; 7707 case 2: 7708 printf(", speed: 3.0Gb/s"); 7709 break; 7710 } 7711 printf("\n"); 7712 break; 7713 7714 default: 7715 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7716 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7717 } 7718 } 7719 7720 u_int8_t 7721 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7722 { 7723 if (reg & _WDC_AUX) { 7724 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 7725 (reg & _WDC_REGMASK) << 2)); 7726 } else { 7727 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 7728 (reg & _WDC_REGMASK) << 2)); 7729 } 7730 } 7731 7732 void 7733 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7734 { 7735 if (reg & _WDC_AUX) { 7736 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7737 (reg & _WDC_REGMASK) << 2, val); 7738 } else { 7739 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7740 (reg & _WDC_REGMASK) << 2, val); 7741 } 7742 } 7743 7744 void 7745 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 7746 { 7747 if (reg & _WDC_AUX) { 7748 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7749 (reg & _WDC_REGMASK) << 2, val); 7750 } else { 7751 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7752 (reg & _WDC_REGMASK) << 2, val); 7753 } 7754 } 7755 7756 #define ACARD_IS_850(sc) \ 7757 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 7758 7759 void 7760 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7761 { 7762 struct pciide_channel *cp; 7763 int i; 7764 pcireg_t interface; 7765 bus_size_t cmdsize, ctlsize; 7766 7767 /* 7768 * when the chip is in native mode it identifies itself as a 7769 * 'misc mass storage'. Fake interface in this case. 7770 */ 7771 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 7772 interface = PCI_INTERFACE(pa->pa_class); 7773 } else { 7774 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 7775 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 7776 } 7777 7778 printf(": DMA"); 7779 pciide_mapreg_dma(sc, pa); 7780 printf("\n"); 7781 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7782 WDC_CAPABILITY_MODE; 7783 7784 if (sc->sc_dma_ok) { 7785 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7786 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7787 sc->sc_wdcdev.irqack = pciide_irqack; 7788 } 7789 sc->sc_wdcdev.PIO_cap = 4; 7790 sc->sc_wdcdev.DMA_cap = 2; 7791 switch (sc->sc_pp->ide_product) { 7792 case PCI_PRODUCT_ACARD_ATP850U: 7793 sc->sc_wdcdev.UDMA_cap = 2; 7794 break; 7795 case PCI_PRODUCT_ACARD_ATP860: 7796 case PCI_PRODUCT_ACARD_ATP860A: 7797 sc->sc_wdcdev.UDMA_cap = 4; 7798 break; 7799 case PCI_PRODUCT_ACARD_ATP865A: 7800 case PCI_PRODUCT_ACARD_ATP865R: 7801 sc->sc_wdcdev.UDMA_cap = 6; 7802 break; 7803 } 7804 7805 sc->sc_wdcdev.set_modes = acard_setup_channel; 7806 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7807 sc->sc_wdcdev.nchannels = 2; 7808 7809 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7810 cp = &sc->pciide_channels[i]; 7811 if (pciide_chansetup(sc, i, interface) == 0) 7812 continue; 7813 if (interface & PCIIDE_INTERFACE_PCI(i)) { 7814 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 7815 &ctlsize, pciide_pci_intr); 7816 } else { 7817 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 7818 &cmdsize, &ctlsize); 7819 } 7820 if (cp->hw_ok == 0) 7821 return; 7822 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 7823 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 7824 wdcattach(&cp->wdc_channel); 7825 acard_setup_channel(&cp->wdc_channel); 7826 } 7827 if (!ACARD_IS_850(sc)) { 7828 u_int32_t reg; 7829 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 7830 reg &= ~ATP860_CTRL_INT; 7831 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 7832 } 7833 } 7834 7835 void 7836 acard_setup_channel(struct channel_softc *chp) 7837 { 7838 struct ata_drive_datas *drvp; 7839 struct pciide_channel *cp = (struct pciide_channel *)chp; 7840 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7841 int channel = chp->channel; 7842 int drive; 7843 u_int32_t idetime, udma_mode; 7844 u_int32_t idedma_ctl; 7845 7846 /* setup DMA if needed */ 7847 pciide_channel_dma_setup(cp); 7848 7849 if (ACARD_IS_850(sc)) { 7850 idetime = 0; 7851 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 7852 udma_mode &= ~ATP850_UDMA_MASK(channel); 7853 } else { 7854 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 7855 idetime &= ~ATP860_SETTIME_MASK(channel); 7856 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 7857 udma_mode &= ~ATP860_UDMA_MASK(channel); 7858 } 7859 7860 idedma_ctl = 0; 7861 7862 /* Per drive settings */ 7863 for (drive = 0; drive < 2; drive++) { 7864 drvp = &chp->ch_drive[drive]; 7865 /* If no drive, skip */ 7866 if ((drvp->drive_flags & DRIVE) == 0) 7867 continue; 7868 /* add timing values, setup DMA if needed */ 7869 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7870 (drvp->drive_flags & DRIVE_UDMA)) { 7871 /* use Ultra/DMA */ 7872 if (ACARD_IS_850(sc)) { 7873 idetime |= ATP850_SETTIME(drive, 7874 acard_act_udma[drvp->UDMA_mode], 7875 acard_rec_udma[drvp->UDMA_mode]); 7876 udma_mode |= ATP850_UDMA_MODE(channel, drive, 7877 acard_udma_conf[drvp->UDMA_mode]); 7878 } else { 7879 idetime |= ATP860_SETTIME(channel, drive, 7880 acard_act_udma[drvp->UDMA_mode], 7881 acard_rec_udma[drvp->UDMA_mode]); 7882 udma_mode |= ATP860_UDMA_MODE(channel, drive, 7883 acard_udma_conf[drvp->UDMA_mode]); 7884 } 7885 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7886 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7887 (drvp->drive_flags & DRIVE_DMA)) { 7888 /* use Multiword DMA */ 7889 drvp->drive_flags &= ~DRIVE_UDMA; 7890 if (ACARD_IS_850(sc)) { 7891 idetime |= ATP850_SETTIME(drive, 7892 acard_act_dma[drvp->DMA_mode], 7893 acard_rec_dma[drvp->DMA_mode]); 7894 } else { 7895 idetime |= ATP860_SETTIME(channel, drive, 7896 acard_act_dma[drvp->DMA_mode], 7897 acard_rec_dma[drvp->DMA_mode]); 7898 } 7899 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7900 } else { 7901 /* PIO only */ 7902 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7903 if (ACARD_IS_850(sc)) { 7904 idetime |= ATP850_SETTIME(drive, 7905 acard_act_pio[drvp->PIO_mode], 7906 acard_rec_pio[drvp->PIO_mode]); 7907 } else { 7908 idetime |= ATP860_SETTIME(channel, drive, 7909 acard_act_pio[drvp->PIO_mode], 7910 acard_rec_pio[drvp->PIO_mode]); 7911 } 7912 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 7913 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 7914 | ATP8x0_CTRL_EN(channel)); 7915 } 7916 } 7917 7918 if (idedma_ctl != 0) { 7919 /* Add software bits in status register */ 7920 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7921 IDEDMA_CTL(channel), idedma_ctl); 7922 } 7923 pciide_print_modes(cp); 7924 7925 if (ACARD_IS_850(sc)) { 7926 pci_conf_write(sc->sc_pc, sc->sc_tag, 7927 ATP850_IDETIME(channel), idetime); 7928 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 7929 } else { 7930 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 7931 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 7932 } 7933 } 7934 7935 void 7936 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7937 { 7938 struct pciide_channel *cp; 7939 int channel; 7940 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7941 bus_size_t cmdsize, ctlsize; 7942 u_int32_t conf; 7943 7944 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 7945 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 7946 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 7947 7948 printf(": DMA"); 7949 pciide_mapreg_dma(sc, pa); 7950 7951 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7952 WDC_CAPABILITY_MODE; 7953 if (sc->sc_dma_ok) { 7954 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7955 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7956 sc->sc_wdcdev.irqack = pciide_irqack; 7957 } 7958 sc->sc_wdcdev.PIO_cap = 4; 7959 sc->sc_wdcdev.DMA_cap = 2; 7960 switch (sc->sc_pp->ide_product) { 7961 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 7962 sc->sc_wdcdev.UDMA_cap = 5; 7963 break; 7964 default: 7965 sc->sc_wdcdev.UDMA_cap = 6; 7966 } 7967 sc->sc_wdcdev.set_modes = nforce_setup_channel; 7968 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7969 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7970 7971 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7972 7973 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7974 cp = &sc->pciide_channels[channel]; 7975 7976 if (pciide_chansetup(sc, channel, interface) == 0) 7977 continue; 7978 7979 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 7980 printf("%s: %s ignored (disabled)\n", 7981 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7982 continue; 7983 } 7984 7985 pciide_map_compat_intr(pa, cp, channel, interface); 7986 if (cp->hw_ok == 0) 7987 continue; 7988 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7989 nforce_pci_intr); 7990 if (cp->hw_ok == 0) { 7991 pciide_unmap_compat_intr(pa, cp, channel, interface); 7992 continue; 7993 } 7994 7995 if (pciide_chan_candisable(cp)) { 7996 conf &= ~NFORCE_CHAN_EN(channel); 7997 pciide_unmap_compat_intr(pa, cp, channel, interface); 7998 continue; 7999 } 8000 8001 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8002 } 8003 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8004 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8005 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8006 } 8007 8008 void 8009 nforce_setup_channel(struct channel_softc *chp) 8010 { 8011 struct ata_drive_datas *drvp; 8012 int drive, mode; 8013 u_int32_t idedma_ctl; 8014 struct pciide_channel *cp = (struct pciide_channel *)chp; 8015 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8016 int channel = chp->channel; 8017 u_int32_t conf, piodmatim, piotim, udmatim; 8018 8019 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8020 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 8021 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 8022 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 8023 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 8024 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8025 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8026 8027 /* Setup DMA if needed */ 8028 pciide_channel_dma_setup(cp); 8029 8030 /* Clear all bits for this channel */ 8031 idedma_ctl = 0; 8032 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 8033 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 8034 8035 /* Per channel settings */ 8036 for (drive = 0; drive < 2; drive++) { 8037 drvp = &chp->ch_drive[drive]; 8038 8039 /* If no drive, skip */ 8040 if ((drvp->drive_flags & DRIVE) == 0) 8041 continue; 8042 8043 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8044 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8045 /* Setup UltraDMA mode */ 8046 drvp->drive_flags &= ~DRIVE_DMA; 8047 8048 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 8049 nforce_udma[drvp->UDMA_mode]) | 8050 NFORCE_UDMA_EN(channel, drive) | 8051 NFORCE_UDMA_ENM(channel, drive); 8052 8053 mode = drvp->PIO_mode; 8054 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8055 (drvp->drive_flags & DRIVE_DMA) != 0) { 8056 /* Setup multiword DMA mode */ 8057 drvp->drive_flags &= ~DRIVE_UDMA; 8058 8059 /* mode = min(pio, dma + 2) */ 8060 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8061 mode = drvp->PIO_mode; 8062 else 8063 mode = drvp->DMA_mode + 2; 8064 } else { 8065 mode = drvp->PIO_mode; 8066 goto pio; 8067 } 8068 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8069 8070 pio: 8071 /* Setup PIO mode */ 8072 if (mode <= 2) { 8073 drvp->DMA_mode = 0; 8074 drvp->PIO_mode = 0; 8075 mode = 0; 8076 } else { 8077 drvp->PIO_mode = mode; 8078 drvp->DMA_mode = mode - 2; 8079 } 8080 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 8081 nforce_pio[mode]); 8082 } 8083 8084 if (idedma_ctl != 0) { 8085 /* Add software bits in status register */ 8086 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8087 IDEDMA_CTL(channel), idedma_ctl); 8088 } 8089 8090 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 8091 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8092 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8093 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 8094 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 8095 8096 pciide_print_modes(cp); 8097 } 8098 8099 int 8100 nforce_pci_intr(void *arg) 8101 { 8102 struct pciide_softc *sc = arg; 8103 struct pciide_channel *cp; 8104 struct channel_softc *wdc_cp; 8105 int i, rv, crv; 8106 u_int32_t dmastat; 8107 8108 rv = 0; 8109 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8110 cp = &sc->pciide_channels[i]; 8111 wdc_cp = &cp->wdc_channel; 8112 8113 /* Skip compat channel */ 8114 if (cp->compat) 8115 continue; 8116 8117 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8118 IDEDMA_CTL(i)); 8119 if ((dmastat & IDEDMA_CTL_INTR) == 0) 8120 continue; 8121 8122 crv = wdcintr(wdc_cp); 8123 if (crv == 0) 8124 printf("%s:%d: bogus intr\n", 8125 sc->sc_wdcdev.sc_dev.dv_xname, i); 8126 else 8127 rv = 1; 8128 } 8129 return (rv); 8130 } 8131 8132 void 8133 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8134 { 8135 struct pciide_channel *cp; 8136 bus_size_t cmdsize, ctlsize; 8137 pcireg_t interface; 8138 int channel; 8139 8140 printf(": DMA"); 8141 #ifdef PCIIDE_I31244_DISABLEDMA 8142 if (sc->sc_rev == 0) { 8143 printf(" disabled due to rev. 0"); 8144 sc->sc_dma_ok = 0; 8145 } else 8146 #endif 8147 pciide_mapreg_dma(sc, pa); 8148 printf("\n"); 8149 8150 /* 8151 * XXX Configure LEDs to show activity. 8152 */ 8153 8154 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8155 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 8156 sc->sc_wdcdev.PIO_cap = 4; 8157 if (sc->sc_dma_ok) { 8158 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8159 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8160 sc->sc_wdcdev.irqack = pciide_irqack; 8161 sc->sc_wdcdev.DMA_cap = 2; 8162 sc->sc_wdcdev.UDMA_cap = 6; 8163 } 8164 sc->sc_wdcdev.set_modes = sata_setup_channel; 8165 8166 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8167 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8168 8169 interface = PCI_INTERFACE(pa->pa_class); 8170 8171 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8172 cp = &sc->pciide_channels[channel]; 8173 if (pciide_chansetup(sc, channel, interface) == 0) 8174 continue; 8175 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8176 pciide_pci_intr); 8177 if (cp->hw_ok == 0) 8178 continue; 8179 pciide_map_compat_intr(pa, cp, channel, interface); 8180 sata_setup_channel(&cp->wdc_channel); 8181 } 8182 } 8183 8184 void 8185 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8186 { 8187 struct pciide_channel *cp; 8188 int channel; 8189 pcireg_t interface; 8190 bus_size_t cmdsize, ctlsize; 8191 pcireg_t cfg, modectl; 8192 8193 /* 8194 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8195 */ 8196 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8197 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8198 8199 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8200 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8201 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8202 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8203 modectl & IT_MODE_MASK), DEBUG_PROBE); 8204 8205 printf(": DMA"); 8206 pciide_mapreg_dma(sc, pa); 8207 8208 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8209 WDC_CAPABILITY_MODE; 8210 if (sc->sc_dma_ok) { 8211 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8212 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8213 sc->sc_wdcdev.irqack = pciide_irqack; 8214 } 8215 sc->sc_wdcdev.PIO_cap = 4; 8216 sc->sc_wdcdev.DMA_cap = 2; 8217 sc->sc_wdcdev.UDMA_cap = 6; 8218 8219 sc->sc_wdcdev.set_modes = ite_setup_channel; 8220 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8221 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8222 8223 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8224 8225 /* Disable RAID */ 8226 modectl &= ~IT_MODE_RAID1; 8227 /* Disable CPU firmware mode */ 8228 modectl &= ~IT_MODE_CPU; 8229 8230 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8231 8232 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8233 cp = &sc->pciide_channels[channel]; 8234 8235 if (pciide_chansetup(sc, channel, interface) == 0) 8236 continue; 8237 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8238 pciide_pci_intr); 8239 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8240 } 8241 8242 /* Re-read configuration registers after channels setup */ 8243 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8244 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8245 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8246 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8247 modectl & IT_MODE_MASK), DEBUG_PROBE); 8248 } 8249 8250 void 8251 ite_setup_channel(struct channel_softc *chp) 8252 { 8253 struct ata_drive_datas *drvp; 8254 int drive, mode; 8255 u_int32_t idedma_ctl; 8256 struct pciide_channel *cp = (struct pciide_channel *)chp; 8257 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8258 int channel = chp->channel; 8259 pcireg_t cfg, modectl; 8260 pcireg_t tim; 8261 8262 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8263 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8264 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8265 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8266 channel, tim), DEBUG_PROBE); 8267 8268 /* Setup DMA if needed */ 8269 pciide_channel_dma_setup(cp); 8270 8271 /* Clear all bits for this channel */ 8272 idedma_ctl = 0; 8273 8274 /* Per channel settings */ 8275 for (drive = 0; drive < 2; drive++) { 8276 drvp = &chp->ch_drive[drive]; 8277 8278 /* If no drive, skip */ 8279 if ((drvp->drive_flags & DRIVE) == 0) 8280 continue; 8281 8282 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8283 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8284 /* Setup UltraDMA mode */ 8285 drvp->drive_flags &= ~DRIVE_DMA; 8286 modectl &= ~IT_MODE_DMA(channel, drive); 8287 8288 #if 0 8289 /* Check cable, works only in CPU firmware mode */ 8290 if (drvp->UDMA_mode > 2 && 8291 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8292 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8293 "80-wire cable not detected\n", 8294 drvp->drive_name, 8295 sc->sc_wdcdev.sc_dev.dv_xname, 8296 channel, drive), DEBUG_PROBE); 8297 drvp->UDMA_mode = 2; 8298 } 8299 #endif 8300 8301 if (drvp->UDMA_mode >= 5) 8302 tim |= IT_TIM_UDMA5(drive); 8303 else 8304 tim &= ~IT_TIM_UDMA5(drive); 8305 8306 mode = drvp->PIO_mode; 8307 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8308 (drvp->drive_flags & DRIVE_DMA) != 0) { 8309 /* Setup multiword DMA mode */ 8310 drvp->drive_flags &= ~DRIVE_UDMA; 8311 modectl |= IT_MODE_DMA(channel, drive); 8312 8313 /* mode = min(pio, dma + 2) */ 8314 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8315 mode = drvp->PIO_mode; 8316 else 8317 mode = drvp->DMA_mode + 2; 8318 } else { 8319 goto pio; 8320 } 8321 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8322 8323 pio: 8324 /* Setup PIO mode */ 8325 if (mode <= 2) { 8326 drvp->DMA_mode = 0; 8327 drvp->PIO_mode = 0; 8328 mode = 0; 8329 } else { 8330 drvp->PIO_mode = mode; 8331 drvp->DMA_mode = mode - 2; 8332 } 8333 8334 /* Enable IORDY if PIO mode >= 3 */ 8335 if (drvp->PIO_mode >= 3) 8336 cfg |= IT_CFG_IORDY(channel); 8337 } 8338 8339 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8340 tim), DEBUG_PROBE); 8341 8342 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8343 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8344 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8345 8346 if (idedma_ctl != 0) { 8347 /* Add software bits in status register */ 8348 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8349 IDEDMA_CTL(channel), idedma_ctl); 8350 } 8351 8352 pciide_print_modes(cp); 8353 } 8354 8355 void 8356 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8357 { 8358 struct pciide_channel *cp; 8359 int channel; 8360 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8361 bus_size_t cmdsize, ctlsize; 8362 8363 printf(": DMA"); 8364 pciide_mapreg_dma(sc, pa); 8365 8366 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8367 WDC_CAPABILITY_MODE; 8368 if (sc->sc_dma_ok) { 8369 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8370 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8371 sc->sc_wdcdev.irqack = pciide_irqack; 8372 } 8373 sc->sc_wdcdev.PIO_cap = 4; 8374 sc->sc_wdcdev.DMA_cap = 2; 8375 sc->sc_wdcdev.UDMA_cap = 6; 8376 8377 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8378 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8379 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8380 8381 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8382 8383 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8384 cp = &sc->pciide_channels[channel]; 8385 if (pciide_chansetup(sc, channel, interface) == 0) 8386 continue; 8387 pciide_map_compat_intr(pa, cp, channel, interface); 8388 if (cp->hw_ok == 0) 8389 continue; 8390 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8391 pciide_pci_intr); 8392 if (cp->hw_ok == 0) { 8393 pciide_unmap_compat_intr(pa, cp, channel, interface); 8394 continue; 8395 } 8396 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8397 } 8398 } 8399 8400 void 8401 ixp_setup_channel(struct channel_softc *chp) 8402 { 8403 struct ata_drive_datas *drvp; 8404 int drive, mode; 8405 u_int32_t idedma_ctl; 8406 struct pciide_channel *cp = (struct pciide_channel*)chp; 8407 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8408 int channel = chp->channel; 8409 pcireg_t udma, mdma_timing, pio, pio_timing; 8410 8411 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8412 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8413 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8414 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8415 8416 /* Setup DMA if needed */ 8417 pciide_channel_dma_setup(cp); 8418 8419 idedma_ctl = 0; 8420 8421 /* Per channel settings */ 8422 for (drive = 0; drive < 2; drive++) { 8423 drvp = &chp->ch_drive[drive]; 8424 8425 /* If no drive, skip */ 8426 if ((drvp->drive_flags & DRIVE) == 0) 8427 continue; 8428 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8429 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8430 /* Setup UltraDMA mode */ 8431 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8432 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8433 IXP_SET_MODE(udma, chp->channel, drive, 8434 drvp->UDMA_mode); 8435 mode = drvp->PIO_mode; 8436 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8437 (drvp->drive_flags & DRIVE_DMA) != 0) { 8438 /* Setup multiword DMA mode */ 8439 drvp->drive_flags &= ~DRIVE_UDMA; 8440 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8441 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8442 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8443 ixp_mdma_timings[drvp->DMA_mode]); 8444 8445 /* mode = min(pio, dma + 2) */ 8446 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8447 mode = drvp->PIO_mode; 8448 else 8449 mode = drvp->DMA_mode + 2; 8450 } else { 8451 mode = drvp->PIO_mode; 8452 } 8453 8454 /* Setup PIO mode */ 8455 drvp->PIO_mode = mode; 8456 if (mode < 2) 8457 drvp->DMA_mode = 0; 8458 else 8459 drvp->DMA_mode = mode - 2; 8460 /* 8461 * Set PIO mode and timings 8462 * Linux driver avoids PIO mode 1, let's do it too. 8463 */ 8464 if (drvp->PIO_mode == 1) 8465 drvp->PIO_mode = 0; 8466 8467 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8468 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8469 ixp_pio_timings[drvp->PIO_mode]); 8470 } 8471 8472 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8473 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8474 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8475 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8476 8477 if (idedma_ctl != 0) { 8478 /* Add software bits in status register */ 8479 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8480 IDEDMA_CTL(channel), idedma_ctl); 8481 } 8482 8483 pciide_print_modes(cp); 8484 } 8485 8486 void 8487 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8488 { 8489 struct pciide_channel *cp; 8490 int channel; 8491 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8492 bus_size_t cmdsize, ctlsize; 8493 u_int32_t conf; 8494 8495 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8496 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8497 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8498 8499 printf(": DMA"); 8500 pciide_mapreg_dma(sc, pa); 8501 8502 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8503 WDC_CAPABILITY_MODE; 8504 if (sc->sc_dma_ok) { 8505 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8506 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8507 sc->sc_wdcdev.irqack = pciide_irqack; 8508 } 8509 sc->sc_wdcdev.PIO_cap = 4; 8510 sc->sc_wdcdev.DMA_cap = 2; 8511 sc->sc_wdcdev.UDMA_cap = 6; 8512 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8513 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8514 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8515 8516 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8517 8518 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8519 cp = &sc->pciide_channels[channel]; 8520 8521 if (pciide_chansetup(sc, channel, interface) == 0) 8522 continue; 8523 8524 #if 0 8525 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8526 printf("%s: %s ignored (disabled)\n", 8527 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8528 continue; 8529 } 8530 #endif 8531 8532 pciide_map_compat_intr(pa, cp, channel, interface); 8533 if (cp->hw_ok == 0) 8534 continue; 8535 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8536 pciide_pci_intr); 8537 if (cp->hw_ok == 0) { 8538 pciide_unmap_compat_intr(pa, cp, channel, interface); 8539 continue; 8540 } 8541 8542 if (pciide_chan_candisable(cp)) { 8543 conf &= ~JMICRON_CHAN_EN(channel); 8544 pciide_unmap_compat_intr(pa, cp, channel, interface); 8545 continue; 8546 } 8547 8548 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8549 } 8550 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8551 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8552 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8553 } 8554 8555 void 8556 jmicron_setup_channel(struct channel_softc *chp) 8557 { 8558 struct ata_drive_datas *drvp; 8559 int drive, mode; 8560 u_int32_t idedma_ctl; 8561 struct pciide_channel *cp = (struct pciide_channel *)chp; 8562 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8563 int channel = chp->channel; 8564 u_int32_t conf; 8565 8566 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8567 8568 /* Setup DMA if needed */ 8569 pciide_channel_dma_setup(cp); 8570 8571 /* Clear all bits for this channel */ 8572 idedma_ctl = 0; 8573 8574 /* Per channel settings */ 8575 for (drive = 0; drive < 2; drive++) { 8576 drvp = &chp->ch_drive[drive]; 8577 8578 /* If no drive, skip */ 8579 if ((drvp->drive_flags & DRIVE) == 0) 8580 continue; 8581 8582 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8583 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8584 /* Setup UltraDMA mode */ 8585 drvp->drive_flags &= ~DRIVE_DMA; 8586 8587 /* see if cable is up to scratch */ 8588 if ((conf & JMICRON_CONF_40PIN) && 8589 (drvp->UDMA_mode > 2)) 8590 drvp->UDMA_mode = 2; 8591 8592 mode = drvp->PIO_mode; 8593 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8594 (drvp->drive_flags & DRIVE_DMA) != 0) { 8595 /* Setup multiword DMA mode */ 8596 drvp->drive_flags &= ~DRIVE_UDMA; 8597 8598 /* mode = min(pio, dma + 2) */ 8599 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8600 mode = drvp->PIO_mode; 8601 else 8602 mode = drvp->DMA_mode + 2; 8603 } else { 8604 mode = drvp->PIO_mode; 8605 goto pio; 8606 } 8607 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8608 8609 pio: 8610 /* Setup PIO mode */ 8611 if (mode <= 2) { 8612 drvp->DMA_mode = 0; 8613 drvp->PIO_mode = 0; 8614 } else { 8615 drvp->PIO_mode = mode; 8616 drvp->DMA_mode = mode - 2; 8617 } 8618 } 8619 8620 if (idedma_ctl != 0) { 8621 /* Add software bits in status register */ 8622 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8623 IDEDMA_CTL(channel), idedma_ctl); 8624 } 8625 8626 pciide_print_modes(cp); 8627 } 8628 8629 void 8630 phison_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8631 { 8632 struct pciide_channel *cp; 8633 int channel; 8634 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8635 bus_size_t cmdsize, ctlsize; 8636 u_int32_t conf; 8637 8638 sc->chip_unmap = default_chip_unmap; 8639 8640 printf(": DMA"); 8641 pciide_mapreg_dma(sc, pa); 8642 8643 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8644 WDC_CAPABILITY_MODE; 8645 if (sc->sc_dma_ok) { 8646 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8647 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8648 sc->sc_wdcdev.irqack = pciide_irqack; 8649 } 8650 sc->sc_wdcdev.PIO_cap = 4; 8651 sc->sc_wdcdev.DMA_cap = 2; 8652 sc->sc_wdcdev.UDMA_cap = 5; 8653 sc->sc_wdcdev.set_modes = phison_setup_channel; 8654 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8655 sc->sc_wdcdev.nchannels = 1; 8656 8657 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8658 8659 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8660 cp = &sc->pciide_channels[channel]; 8661 8662 if (pciide_chansetup(sc, channel, interface) == 0) 8663 continue; 8664 8665 pciide_map_compat_intr(pa, cp, channel, interface); 8666 if (cp->hw_ok == 0) 8667 continue; 8668 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8669 pciide_pci_intr); 8670 if (cp->hw_ok == 0) { 8671 pciide_unmap_compat_intr(pa, cp, channel, interface); 8672 continue; 8673 } 8674 8675 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8676 } 8677 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8678 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8679 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8680 } 8681 8682 void 8683 phison_setup_channel(struct channel_softc *chp) 8684 { 8685 struct ata_drive_datas *drvp; 8686 int drive, mode; 8687 u_int32_t idedma_ctl; 8688 struct pciide_channel *cp = (struct pciide_channel *)chp; 8689 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8690 int channel = chp->channel; 8691 8692 /* Setup DMA if needed */ 8693 pciide_channel_dma_setup(cp); 8694 8695 /* Clear all bits for this channel */ 8696 idedma_ctl = 0; 8697 8698 /* Per channel settings */ 8699 for (drive = 0; drive < 2; drive++) { 8700 drvp = &chp->ch_drive[drive]; 8701 8702 /* If no drive, skip */ 8703 if ((drvp->drive_flags & DRIVE) == 0) 8704 continue; 8705 8706 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8707 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8708 /* Setup UltraDMA mode */ 8709 drvp->drive_flags &= ~DRIVE_DMA; 8710 mode = drvp->PIO_mode; 8711 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8712 (drvp->drive_flags & DRIVE_DMA) != 0) { 8713 /* Setup multiword DMA mode */ 8714 drvp->drive_flags &= ~DRIVE_UDMA; 8715 8716 /* mode = min(pio, dma + 2) */ 8717 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8718 mode = drvp->PIO_mode; 8719 else 8720 mode = drvp->DMA_mode + 2; 8721 } else { 8722 mode = drvp->PIO_mode; 8723 goto pio; 8724 } 8725 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8726 8727 pio: 8728 /* Setup PIO mode */ 8729 if (mode <= 2) { 8730 drvp->DMA_mode = 0; 8731 drvp->PIO_mode = 0; 8732 } else { 8733 drvp->PIO_mode = mode; 8734 drvp->DMA_mode = mode - 2; 8735 } 8736 } 8737 8738 if (idedma_ctl != 0) { 8739 /* Add software bits in status register */ 8740 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8741 IDEDMA_CTL(channel), idedma_ctl); 8742 } 8743 8744 pciide_print_modes(cp); 8745 } 8746 8747 void 8748 sch_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8749 { 8750 struct pciide_channel *cp; 8751 int channel; 8752 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8753 bus_size_t cmdsize, ctlsize; 8754 8755 printf(": DMA"); 8756 pciide_mapreg_dma(sc, pa); 8757 8758 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8759 WDC_CAPABILITY_MODE; 8760 if (sc->sc_dma_ok) { 8761 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8762 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8763 sc->sc_wdcdev.irqack = pciide_irqack; 8764 } 8765 sc->sc_wdcdev.PIO_cap = 4; 8766 sc->sc_wdcdev.DMA_cap = 2; 8767 sc->sc_wdcdev.UDMA_cap = 5; 8768 sc->sc_wdcdev.set_modes = sch_setup_channel; 8769 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8770 sc->sc_wdcdev.nchannels = 1; 8771 8772 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8773 8774 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8775 cp = &sc->pciide_channels[channel]; 8776 8777 if (pciide_chansetup(sc, channel, interface) == 0) 8778 continue; 8779 8780 pciide_map_compat_intr(pa, cp, channel, interface); 8781 if (cp->hw_ok == 0) 8782 continue; 8783 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8784 pciide_pci_intr); 8785 if (cp->hw_ok == 0) { 8786 pciide_unmap_compat_intr(pa, cp, channel, interface); 8787 continue; 8788 } 8789 8790 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8791 } 8792 } 8793 8794 void 8795 sch_setup_channel(struct channel_softc *chp) 8796 { 8797 struct ata_drive_datas *drvp; 8798 int drive, mode; 8799 u_int32_t tim, timaddr; 8800 struct pciide_channel *cp = (struct pciide_channel *)chp; 8801 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8802 8803 /* Setup DMA if needed */ 8804 pciide_channel_dma_setup(cp); 8805 8806 /* Per channel settings */ 8807 for (drive = 0; drive < 2; drive++) { 8808 drvp = &chp->ch_drive[drive]; 8809 8810 /* If no drive, skip */ 8811 if ((drvp->drive_flags & DRIVE) == 0) 8812 continue; 8813 8814 timaddr = (drive == 0) ? SCH_D0TIM : SCH_D1TIM; 8815 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, timaddr); 8816 tim &= ~SCH_TIM_MASK; 8817 8818 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8819 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8820 /* Setup UltraDMA mode */ 8821 drvp->drive_flags &= ~DRIVE_DMA; 8822 8823 mode = drvp->PIO_mode; 8824 tim |= (drvp->UDMA_mode << 16) | SCH_TIM_SYNCDMA; 8825 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8826 (drvp->drive_flags & DRIVE_DMA) != 0) { 8827 /* Setup multiword DMA mode */ 8828 drvp->drive_flags &= ~DRIVE_UDMA; 8829 8830 tim &= ~SCH_TIM_SYNCDMA; 8831 8832 /* mode = min(pio, dma + 2) */ 8833 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8834 mode = drvp->PIO_mode; 8835 else 8836 mode = drvp->DMA_mode + 2; 8837 } else { 8838 mode = drvp->PIO_mode; 8839 goto pio; 8840 } 8841 8842 pio: 8843 /* Setup PIO mode */ 8844 if (mode <= 2) { 8845 drvp->DMA_mode = 0; 8846 drvp->PIO_mode = 0; 8847 } else { 8848 drvp->PIO_mode = mode; 8849 drvp->DMA_mode = mode - 2; 8850 } 8851 tim |= (drvp->DMA_mode << 8) | (drvp->PIO_mode); 8852 pci_conf_write(sc->sc_pc, sc->sc_tag, timaddr, tim); 8853 } 8854 8855 pciide_print_modes(cp); 8856 } 8857