1 /* $OpenBSD: pciide.c,v 1.301 2009/10/05 20:39:26 deraadt Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 /* 36 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by Christopher G. Demetriou 49 * for the NetBSD Project. 50 * 4. The name of the author may not be used to endorse or promote products 51 * derived from this software without specific prior written permission 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 57 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 58 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 62 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 /* 66 * PCI IDE controller driver. 67 * 68 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 69 * sys/dev/pci/ppb.c, revision 1.16). 70 * 71 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 72 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 73 * 5/16/94" from the PCI SIG. 74 * 75 */ 76 77 #define DEBUG_DMA 0x01 78 #define DEBUG_XFERS 0x02 79 #define DEBUG_FUNCS 0x08 80 #define DEBUG_PROBE 0x10 81 82 #ifdef WDCDEBUG 83 #ifndef WDCDEBUG_PCIIDE_MASK 84 #define WDCDEBUG_PCIIDE_MASK 0x00 85 #endif 86 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 87 #define WDCDEBUG_PRINT(args, level) do { \ 88 if ((wdcdebug_pciide_mask & (level)) != 0) \ 89 printf args; \ 90 } while (0) 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <machine/bus.h> 100 #include <machine/endian.h> 101 102 #include <dev/ata/atavar.h> 103 #include <dev/ata/satareg.h> 104 #include <dev/ic/wdcreg.h> 105 #include <dev/ic/wdcvar.h> 106 107 #include <dev/pci/pcireg.h> 108 #include <dev/pci/pcivar.h> 109 #include <dev/pci/pcidevs.h> 110 111 #include <dev/pci/pciidereg.h> 112 #include <dev/pci/pciidevar.h> 113 #include <dev/pci/pciide_piix_reg.h> 114 #include <dev/pci/pciide_amd_reg.h> 115 #include <dev/pci/pciide_apollo_reg.h> 116 #include <dev/pci/pciide_cmd_reg.h> 117 #include <dev/pci/pciide_sii3112_reg.h> 118 #include <dev/pci/pciide_cy693_reg.h> 119 #include <dev/pci/pciide_sis_reg.h> 120 #include <dev/pci/pciide_acer_reg.h> 121 #include <dev/pci/pciide_pdc202xx_reg.h> 122 #include <dev/pci/pciide_opti_reg.h> 123 #include <dev/pci/pciide_hpt_reg.h> 124 #include <dev/pci/pciide_acard_reg.h> 125 #include <dev/pci/pciide_natsemi_reg.h> 126 #include <dev/pci/pciide_nforce_reg.h> 127 #include <dev/pci/pciide_i31244_reg.h> 128 #include <dev/pci/pciide_ite_reg.h> 129 #include <dev/pci/pciide_ixp_reg.h> 130 #include <dev/pci/pciide_svwsata_reg.h> 131 #include <dev/pci/pciide_jmicron_reg.h> 132 #include <dev/pci/cy82c693var.h> 133 134 /* functions for reading/writing 8-bit PCI registers */ 135 136 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 137 int); 138 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 139 int, u_int8_t); 140 141 u_int8_t 142 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 143 { 144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 145 ((reg & 0x03) * 8) & 0xff); 146 } 147 148 void 149 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 150 { 151 pcireg_t pcival; 152 153 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 154 pcival &= ~(0xff << ((reg & 0x03) * 8)); 155 pcival |= (val << ((reg & 0x03) * 8)); 156 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 157 } 158 159 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 160 161 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 162 void sata_setup_channel(struct channel_softc *); 163 164 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 165 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 166 void piix_setup_channel(struct channel_softc *); 167 void piix3_4_setup_channel(struct channel_softc *); 168 void piix_timing_debug(struct pciide_softc *); 169 170 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 171 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 172 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 173 174 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 175 void amd756_setup_channel(struct channel_softc *); 176 177 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 178 void apollo_setup_channel(struct channel_softc *); 179 180 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 181 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 182 void cmd0643_9_setup_channel(struct channel_softc *); 183 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 184 void cmd680_setup_channel(struct channel_softc *); 185 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 186 void cmd_channel_map(struct pci_attach_args *, 187 struct pciide_softc *, int); 188 int cmd_pci_intr(void *); 189 void cmd646_9_irqack(struct channel_softc *); 190 191 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 192 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 193 void sii3112_setup_channel(struct channel_softc *); 194 void sii3112_drv_probe(struct channel_softc *); 195 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 196 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 197 int sii3114_chansetup(struct pciide_softc *, int); 198 void sii3114_mapchan(struct pciide_channel *); 199 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 200 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 201 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 202 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 203 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 204 205 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 206 void cy693_setup_channel(struct channel_softc *); 207 208 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 209 void sis_setup_channel(struct channel_softc *); 210 void sis96x_setup_channel(struct channel_softc *); 211 int sis_hostbr_match(struct pci_attach_args *); 212 int sis_south_match(struct pci_attach_args *); 213 214 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 215 void natsemi_setup_channel(struct channel_softc *); 216 int natsemi_pci_intr(void *); 217 void natsemi_irqack(struct channel_softc *); 218 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 219 void ns_scx200_setup_channel(struct channel_softc *); 220 221 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 222 void acer_setup_channel(struct channel_softc *); 223 int acer_pci_intr(void *); 224 225 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 226 void pdc202xx_setup_channel(struct channel_softc *); 227 void pdc20268_setup_channel(struct channel_softc *); 228 int pdc202xx_pci_intr(void *); 229 int pdc20265_pci_intr(void *); 230 void pdc20262_dma_start(void *, int, int); 231 int pdc20262_dma_finish(void *, int, int, int); 232 233 u_int8_t pdc268_config_read(struct channel_softc *, int); 234 235 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 236 void pdc203xx_setup_channel(struct channel_softc *); 237 int pdc203xx_pci_intr(void *); 238 void pdc203xx_irqack(struct channel_softc *); 239 void pdc203xx_dma_start(void *,int ,int); 240 int pdc203xx_dma_finish(void *, int, int, int); 241 int pdc205xx_pci_intr(void *); 242 void pdc205xx_do_reset(struct channel_softc *); 243 void pdc205xx_drv_probe(struct channel_softc *); 244 245 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 246 void opti_setup_channel(struct channel_softc *); 247 248 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 249 void hpt_setup_channel(struct channel_softc *); 250 int hpt_pci_intr(void *); 251 252 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 253 void acard_setup_channel(struct channel_softc *); 254 255 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 256 void serverworks_setup_channel(struct channel_softc *); 257 int serverworks_pci_intr(void *); 258 259 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 260 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 261 void svwsata_mapchan(struct pciide_channel *); 262 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 263 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 264 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 265 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 266 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 267 void svwsata_drv_probe(struct channel_softc *); 268 269 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 270 void nforce_setup_channel(struct channel_softc *); 271 int nforce_pci_intr(void *); 272 273 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 274 275 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 276 void ite_setup_channel(struct channel_softc *); 277 278 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 279 void ixp_setup_channel(struct channel_softc *); 280 281 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 282 void jmicron_setup_channel(struct channel_softc *); 283 284 void phison_chip_map(struct pciide_softc *, struct pci_attach_args *); 285 void phison_setup_channel(struct channel_softc *); 286 287 void sch_chip_map(struct pciide_softc *, struct pci_attach_args *); 288 void sch_setup_channel(struct channel_softc *); 289 290 struct pciide_product_desc { 291 u_int32_t ide_product; 292 u_short ide_flags; 293 /* map and setup chip, probe drives */ 294 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 295 }; 296 297 /* Flags for ide_flags */ 298 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 299 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 300 301 /* Default product description for devices not known from this controller */ 302 const struct pciide_product_desc default_product_desc = { 303 0, /* Generic PCI IDE controller */ 304 0, 305 default_chip_map 306 }; 307 308 const struct pciide_product_desc pciide_intel_products[] = { 309 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 310 0, 311 artisea_chip_map 312 }, 313 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 314 0, 315 default_chip_map 316 }, 317 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 318 0, 319 piix_chip_map 320 }, 321 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 322 0, 323 piix_chip_map 324 }, 325 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 326 0, 327 piix_chip_map 328 }, 329 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 330 0, 331 piix_chip_map 332 }, 333 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 334 0, 335 piix_chip_map 336 }, 337 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 338 0, 339 piix_chip_map 340 }, 341 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 342 0, 343 piix_chip_map 344 }, 345 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 346 0, 347 piix_chip_map 348 }, 349 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 350 0, 351 piix_chip_map 352 }, 353 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 354 0, 355 piix_chip_map 356 }, 357 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 358 0, 359 piix_chip_map 360 }, 361 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 362 0, 363 piix_chip_map 364 }, 365 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 366 0, 367 piix_chip_map 368 }, 369 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 370 0, 371 piix_chip_map 372 }, 373 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 374 0, 375 piix_chip_map 376 }, 377 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 378 0, 379 piix_chip_map 380 }, 381 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 382 0, 383 piix_chip_map 384 }, 385 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 386 0, 387 piix_chip_map 388 }, 389 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 390 0, 391 piixsata_chip_map 392 }, 393 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 394 0, 395 piixsata_chip_map 396 }, 397 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 398 0, 399 piix_chip_map 400 }, 401 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 402 0, 403 piixsata_chip_map 404 }, 405 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 406 0, 407 piixsata_chip_map 408 }, 409 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 410 0, 411 piix_chip_map 412 }, 413 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 414 0, 415 piix_chip_map 416 }, 417 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 418 0, 419 piixsata_chip_map 420 }, 421 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 422 0, 423 piixsata_chip_map 424 }, 425 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 426 0, 427 piixsata_chip_map 428 }, 429 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 430 0, 431 piix_chip_map 432 }, 433 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 434 0, 435 piixsata_chip_map 436 }, 437 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 438 0, 439 piixsata_chip_map 440 }, 441 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 442 0, 443 piixsata_chip_map 444 }, 445 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 446 0, 447 piixsata_chip_map 448 }, 449 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 450 0, 451 piixsata_chip_map 452 }, 453 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 454 0, 455 piixsata_chip_map 456 }, 457 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 458 0, 459 piixsata_chip_map 460 }, 461 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 462 0, 463 piixsata_chip_map 464 }, 465 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 466 0, 467 piixsata_chip_map 468 }, 469 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 470 0, 471 piixsata_chip_map 472 }, 473 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 474 0, 475 piixsata_chip_map 476 }, 477 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 478 0, 479 piixsata_chip_map 480 }, 481 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 482 0, 483 piixsata_chip_map 484 }, 485 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 486 0, 487 piixsata_chip_map 488 }, 489 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 490 0, 491 piix_chip_map 492 }, 493 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 494 0, 495 piixsata_chip_map 496 }, 497 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 498 0, 499 piixsata_chip_map 500 }, 501 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 502 0, 503 piixsata_chip_map 504 }, 505 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 506 0, 507 piixsata_chip_map 508 }, 509 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 510 0, 511 piixsata_chip_map 512 }, 513 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 514 0, 515 piixsata_chip_map 516 }, 517 { PCI_PRODUCT_INTEL_82801JD_SATA_1, /* Intel 82801JD (ICH10) SATA */ 518 0, 519 piixsata_chip_map 520 }, 521 { PCI_PRODUCT_INTEL_82801JD_SATA_2, /* Intel 82801JD (ICH10) SATA */ 522 0, 523 piixsata_chip_map 524 }, 525 { PCI_PRODUCT_INTEL_82801JI_SATA_1, /* Intel 82801JI (ICH10) SATA */ 526 0, 527 piixsata_chip_map 528 }, 529 { PCI_PRODUCT_INTEL_82801JI_SATA_2, /* Intel 82801JI (ICH10) SATA */ 530 0, 531 piixsata_chip_map 532 }, 533 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 534 0, 535 piixsata_chip_map 536 }, 537 { PCI_PRODUCT_INTEL_SCH_IDE, /* Intel SCH IDE */ 538 0, 539 sch_chip_map 540 } 541 }; 542 543 const struct pciide_product_desc pciide_amd_products[] = { 544 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 545 0, 546 amd756_chip_map 547 }, 548 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 549 0, 550 amd756_chip_map 551 }, 552 { PCI_PRODUCT_AMD_PBC768_IDE, 553 0, 554 amd756_chip_map 555 }, 556 { PCI_PRODUCT_AMD_8111_IDE, 557 0, 558 amd756_chip_map 559 }, 560 { PCI_PRODUCT_AMD_CS5536_IDE, 561 0, 562 amd756_chip_map 563 } 564 }; 565 566 #ifdef notyet 567 const struct pciide_product_desc pciide_opti_products[] = { 568 569 { PCI_PRODUCT_OPTI_82C621, 570 0, 571 opti_chip_map 572 }, 573 { PCI_PRODUCT_OPTI_82C568, 574 0, 575 opti_chip_map 576 }, 577 { PCI_PRODUCT_OPTI_82D568, 578 0, 579 opti_chip_map 580 } 581 }; 582 #endif 583 584 const struct pciide_product_desc pciide_cmd_products[] = { 585 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 586 0, 587 cmd_chip_map 588 }, 589 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 590 0, 591 cmd0643_9_chip_map 592 }, 593 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 594 0, 595 cmd0643_9_chip_map 596 }, 597 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 598 0, 599 cmd0643_9_chip_map 600 }, 601 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 602 0, 603 cmd0643_9_chip_map 604 }, 605 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 606 IDE_PCI_CLASS_OVERRIDE, 607 cmd680_chip_map 608 }, 609 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 610 0, 611 sii3112_chip_map 612 }, 613 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 614 0, 615 sii3112_chip_map 616 }, 617 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 618 0, 619 sii3112_chip_map 620 }, 621 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 622 0, 623 sii3114_chip_map 624 } 625 }; 626 627 const struct pciide_product_desc pciide_via_products[] = { 628 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 629 0, 630 apollo_chip_map 631 }, 632 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 633 0, 634 apollo_chip_map 635 }, 636 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 637 IDE_PCI_CLASS_OVERRIDE, 638 apollo_chip_map 639 }, 640 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 641 0, 642 apollo_chip_map 643 }, 644 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 645 0, 646 apollo_chip_map 647 }, 648 { PCI_PRODUCT_VIATECH_VX855_IDE, /* VIA VX855 IDE */ 649 0, 650 apollo_chip_map 651 }, 652 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 653 0, 654 sata_chip_map 655 }, 656 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 657 0, 658 sata_chip_map 659 }, 660 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 661 0, 662 sata_chip_map 663 }, 664 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 665 0, 666 sata_chip_map 667 }, 668 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 669 0, 670 sata_chip_map 671 }, 672 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 673 0, 674 sata_chip_map 675 } 676 }; 677 678 const struct pciide_product_desc pciide_cypress_products[] = { 679 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 680 IDE_16BIT_IOSPACE, 681 cy693_chip_map 682 } 683 }; 684 685 const struct pciide_product_desc pciide_sis_products[] = { 686 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 687 0, 688 sis_chip_map 689 }, 690 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 691 0, 692 sata_chip_map 693 }, 694 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 695 0, 696 sata_chip_map 697 }, 698 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 699 0, 700 sata_chip_map 701 } 702 }; 703 704 /* 705 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 706 * has been banished to the MD i386 pciide_machdep 707 */ 708 const struct pciide_product_desc pciide_natsemi_products[] = { 709 #ifdef __i386__ 710 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 711 0, 712 gcsc_chip_map 713 }, 714 #endif 715 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 716 0, 717 natsemi_chip_map 718 }, 719 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 720 0, 721 ns_scx200_chip_map 722 } 723 }; 724 725 const struct pciide_product_desc pciide_acer_products[] = { 726 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 727 0, 728 acer_chip_map 729 } 730 }; 731 732 const struct pciide_product_desc pciide_triones_products[] = { 733 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 734 IDE_PCI_CLASS_OVERRIDE, 735 hpt_chip_map, 736 }, 737 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 738 IDE_PCI_CLASS_OVERRIDE, 739 hpt_chip_map 740 }, 741 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 742 IDE_PCI_CLASS_OVERRIDE, 743 hpt_chip_map 744 }, 745 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 746 IDE_PCI_CLASS_OVERRIDE, 747 hpt_chip_map 748 }, 749 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 750 IDE_PCI_CLASS_OVERRIDE, 751 hpt_chip_map 752 } 753 }; 754 755 const struct pciide_product_desc pciide_promise_products[] = { 756 { PCI_PRODUCT_PROMISE_PDC20246, 757 IDE_PCI_CLASS_OVERRIDE, 758 pdc202xx_chip_map, 759 }, 760 { PCI_PRODUCT_PROMISE_PDC20262, 761 IDE_PCI_CLASS_OVERRIDE, 762 pdc202xx_chip_map, 763 }, 764 { PCI_PRODUCT_PROMISE_PDC20265, 765 IDE_PCI_CLASS_OVERRIDE, 766 pdc202xx_chip_map, 767 }, 768 { PCI_PRODUCT_PROMISE_PDC20267, 769 IDE_PCI_CLASS_OVERRIDE, 770 pdc202xx_chip_map, 771 }, 772 { PCI_PRODUCT_PROMISE_PDC20268, 773 IDE_PCI_CLASS_OVERRIDE, 774 pdc202xx_chip_map, 775 }, 776 { PCI_PRODUCT_PROMISE_PDC20268R, 777 IDE_PCI_CLASS_OVERRIDE, 778 pdc202xx_chip_map, 779 }, 780 { PCI_PRODUCT_PROMISE_PDC20269, 781 IDE_PCI_CLASS_OVERRIDE, 782 pdc202xx_chip_map, 783 }, 784 { PCI_PRODUCT_PROMISE_PDC20271, 785 IDE_PCI_CLASS_OVERRIDE, 786 pdc202xx_chip_map, 787 }, 788 { PCI_PRODUCT_PROMISE_PDC20275, 789 IDE_PCI_CLASS_OVERRIDE, 790 pdc202xx_chip_map, 791 }, 792 { PCI_PRODUCT_PROMISE_PDC20276, 793 IDE_PCI_CLASS_OVERRIDE, 794 pdc202xx_chip_map, 795 }, 796 { PCI_PRODUCT_PROMISE_PDC20277, 797 IDE_PCI_CLASS_OVERRIDE, 798 pdc202xx_chip_map, 799 }, 800 { PCI_PRODUCT_PROMISE_PDC20318, 801 IDE_PCI_CLASS_OVERRIDE, 802 pdcsata_chip_map, 803 }, 804 { PCI_PRODUCT_PROMISE_PDC20319, 805 IDE_PCI_CLASS_OVERRIDE, 806 pdcsata_chip_map, 807 }, 808 { PCI_PRODUCT_PROMISE_PDC20371, 809 IDE_PCI_CLASS_OVERRIDE, 810 pdcsata_chip_map, 811 }, 812 { PCI_PRODUCT_PROMISE_PDC20375, 813 IDE_PCI_CLASS_OVERRIDE, 814 pdcsata_chip_map, 815 }, 816 { PCI_PRODUCT_PROMISE_PDC20376, 817 IDE_PCI_CLASS_OVERRIDE, 818 pdcsata_chip_map, 819 }, 820 { PCI_PRODUCT_PROMISE_PDC20377, 821 IDE_PCI_CLASS_OVERRIDE, 822 pdcsata_chip_map, 823 }, 824 { PCI_PRODUCT_PROMISE_PDC20378, 825 IDE_PCI_CLASS_OVERRIDE, 826 pdcsata_chip_map, 827 }, 828 { PCI_PRODUCT_PROMISE_PDC20379, 829 IDE_PCI_CLASS_OVERRIDE, 830 pdcsata_chip_map, 831 }, 832 { PCI_PRODUCT_PROMISE_PDC40518, 833 IDE_PCI_CLASS_OVERRIDE, 834 pdcsata_chip_map, 835 }, 836 { PCI_PRODUCT_PROMISE_PDC40519, 837 IDE_PCI_CLASS_OVERRIDE, 838 pdcsata_chip_map, 839 }, 840 { PCI_PRODUCT_PROMISE_PDC40718, 841 IDE_PCI_CLASS_OVERRIDE, 842 pdcsata_chip_map, 843 }, 844 { PCI_PRODUCT_PROMISE_PDC40719, 845 IDE_PCI_CLASS_OVERRIDE, 846 pdcsata_chip_map, 847 }, 848 { PCI_PRODUCT_PROMISE_PDC40779, 849 IDE_PCI_CLASS_OVERRIDE, 850 pdcsata_chip_map, 851 }, 852 { PCI_PRODUCT_PROMISE_PDC20571, 853 IDE_PCI_CLASS_OVERRIDE, 854 pdcsata_chip_map, 855 }, 856 { PCI_PRODUCT_PROMISE_PDC20575, 857 IDE_PCI_CLASS_OVERRIDE, 858 pdcsata_chip_map, 859 }, 860 { PCI_PRODUCT_PROMISE_PDC20579, 861 IDE_PCI_CLASS_OVERRIDE, 862 pdcsata_chip_map, 863 }, 864 { PCI_PRODUCT_PROMISE_PDC20771, 865 IDE_PCI_CLASS_OVERRIDE, 866 pdcsata_chip_map, 867 }, 868 { PCI_PRODUCT_PROMISE_PDC20775, 869 IDE_PCI_CLASS_OVERRIDE, 870 pdcsata_chip_map, 871 } 872 }; 873 874 const struct pciide_product_desc pciide_acard_products[] = { 875 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 876 IDE_PCI_CLASS_OVERRIDE, 877 acard_chip_map, 878 }, 879 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 880 IDE_PCI_CLASS_OVERRIDE, 881 acard_chip_map, 882 }, 883 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 884 IDE_PCI_CLASS_OVERRIDE, 885 acard_chip_map, 886 }, 887 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 888 IDE_PCI_CLASS_OVERRIDE, 889 acard_chip_map, 890 }, 891 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 892 IDE_PCI_CLASS_OVERRIDE, 893 acard_chip_map, 894 } 895 }; 896 897 const struct pciide_product_desc pciide_serverworks_products[] = { 898 { PCI_PRODUCT_RCC_OSB4_IDE, 899 0, 900 serverworks_chip_map, 901 }, 902 { PCI_PRODUCT_RCC_CSB5_IDE, 903 0, 904 serverworks_chip_map, 905 }, 906 { PCI_PRODUCT_RCC_CSB6_IDE, 907 0, 908 serverworks_chip_map, 909 }, 910 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 911 0, 912 serverworks_chip_map, 913 }, 914 { PCI_PRODUCT_RCC_HT_1000_IDE, 915 0, 916 serverworks_chip_map, 917 }, 918 { PCI_PRODUCT_RCC_K2_SATA, 919 0, 920 svwsata_chip_map, 921 }, 922 { PCI_PRODUCT_RCC_FRODO4_SATA, 923 0, 924 svwsata_chip_map, 925 }, 926 { PCI_PRODUCT_RCC_FRODO8_SATA, 927 0, 928 svwsata_chip_map, 929 }, 930 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 931 0, 932 svwsata_chip_map, 933 }, 934 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 935 0, 936 svwsata_chip_map, 937 } 938 }; 939 940 const struct pciide_product_desc pciide_nvidia_products[] = { 941 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 942 0, 943 nforce_chip_map 944 }, 945 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 946 0, 947 nforce_chip_map 948 }, 949 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 950 0, 951 nforce_chip_map 952 }, 953 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 954 0, 955 nforce_chip_map 956 }, 957 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 958 0, 959 nforce_chip_map 960 }, 961 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 962 0, 963 nforce_chip_map 964 }, 965 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 966 0, 967 nforce_chip_map 968 }, 969 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 970 0, 971 nforce_chip_map 972 }, 973 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 974 0, 975 nforce_chip_map 976 }, 977 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 978 0, 979 nforce_chip_map 980 }, 981 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 982 0, 983 nforce_chip_map 984 }, 985 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 986 0, 987 nforce_chip_map 988 }, 989 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 990 0, 991 nforce_chip_map 992 }, 993 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 994 0, 995 nforce_chip_map 996 }, 997 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 998 0, 999 sata_chip_map 1000 }, 1001 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 1002 0, 1003 sata_chip_map 1004 }, 1005 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 1006 0, 1007 sata_chip_map 1008 }, 1009 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 1010 0, 1011 sata_chip_map 1012 }, 1013 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 1014 0, 1015 sata_chip_map 1016 }, 1017 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 1018 0, 1019 sata_chip_map 1020 }, 1021 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 1022 0, 1023 sata_chip_map 1024 }, 1025 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 1026 0, 1027 sata_chip_map 1028 }, 1029 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1030 0, 1031 sata_chip_map 1032 }, 1033 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1034 0, 1035 sata_chip_map 1036 }, 1037 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1038 0, 1039 sata_chip_map 1040 }, 1041 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1042 0, 1043 sata_chip_map 1044 }, 1045 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1046 0, 1047 sata_chip_map 1048 }, 1049 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1050 0, 1051 sata_chip_map 1052 }, 1053 { PCI_PRODUCT_NVIDIA_MCP65_SATA, 1054 0, 1055 sata_chip_map 1056 }, 1057 { PCI_PRODUCT_NVIDIA_MCP65_SATA2, 1058 0, 1059 sata_chip_map 1060 }, 1061 { PCI_PRODUCT_NVIDIA_MCP65_SATA3, 1062 0, 1063 sata_chip_map 1064 }, 1065 { PCI_PRODUCT_NVIDIA_MCP65_SATA4, 1066 0, 1067 sata_chip_map 1068 }, 1069 { PCI_PRODUCT_NVIDIA_MCP67_SATA, 1070 0, 1071 sata_chip_map 1072 }, 1073 { PCI_PRODUCT_NVIDIA_MCP67_SATA2, 1074 0, 1075 sata_chip_map 1076 }, 1077 { PCI_PRODUCT_NVIDIA_MCP67_SATA3, 1078 0, 1079 sata_chip_map 1080 }, 1081 { PCI_PRODUCT_NVIDIA_MCP67_SATA4, 1082 0, 1083 sata_chip_map 1084 }, 1085 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1086 0, 1087 sata_chip_map 1088 }, 1089 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1090 0, 1091 sata_chip_map 1092 }, 1093 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1094 0, 1095 sata_chip_map 1096 }, 1097 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1098 0, 1099 sata_chip_map 1100 } 1101 }; 1102 1103 const struct pciide_product_desc pciide_ite_products[] = { 1104 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1105 IDE_PCI_CLASS_OVERRIDE, 1106 ite_chip_map 1107 }, 1108 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1109 IDE_PCI_CLASS_OVERRIDE, 1110 ite_chip_map 1111 } 1112 }; 1113 1114 const struct pciide_product_desc pciide_ati_products[] = { 1115 { PCI_PRODUCT_ATI_SB200_IDE, 1116 0, 1117 ixp_chip_map 1118 }, 1119 { PCI_PRODUCT_ATI_SB300_IDE, 1120 0, 1121 ixp_chip_map 1122 }, 1123 { PCI_PRODUCT_ATI_SB400_IDE, 1124 0, 1125 ixp_chip_map 1126 }, 1127 { PCI_PRODUCT_ATI_SB600_IDE, 1128 0, 1129 ixp_chip_map 1130 }, 1131 { PCI_PRODUCT_ATI_SB700_IDE, 1132 0, 1133 ixp_chip_map 1134 }, 1135 { PCI_PRODUCT_ATI_SB300_SATA, 1136 0, 1137 sii3112_chip_map 1138 }, 1139 { PCI_PRODUCT_ATI_SB400_SATA_1, 1140 0, 1141 sii3112_chip_map 1142 }, 1143 { PCI_PRODUCT_ATI_SB400_SATA_2, 1144 0, 1145 sii3112_chip_map 1146 } 1147 }; 1148 1149 const struct pciide_product_desc pciide_jmicron_products[] = { 1150 { PCI_PRODUCT_JMICRON_JMB361, 1151 0, 1152 jmicron_chip_map 1153 }, 1154 { PCI_PRODUCT_JMICRON_JMB363, 1155 0, 1156 jmicron_chip_map 1157 }, 1158 { PCI_PRODUCT_JMICRON_JMB365, 1159 0, 1160 jmicron_chip_map 1161 }, 1162 { PCI_PRODUCT_JMICRON_JMB366, 1163 0, 1164 jmicron_chip_map 1165 }, 1166 { PCI_PRODUCT_JMICRON_JMB368, 1167 0, 1168 jmicron_chip_map 1169 } 1170 }; 1171 1172 const struct pciide_product_desc pciide_phison_products[] = { 1173 { PCI_PRODUCT_PHISON_PS5000, 1174 0, 1175 phison_chip_map 1176 }, 1177 }; 1178 1179 struct pciide_vendor_desc { 1180 u_int32_t ide_vendor; 1181 const struct pciide_product_desc *ide_products; 1182 int ide_nproducts; 1183 }; 1184 1185 const struct pciide_vendor_desc pciide_vendors[] = { 1186 { PCI_VENDOR_INTEL, pciide_intel_products, 1187 sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) }, 1188 { PCI_VENDOR_AMD, pciide_amd_products, 1189 sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) }, 1190 #ifdef notyet 1191 { PCI_VENDOR_OPTI, pciide_opti_products, 1192 sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) }, 1193 #endif 1194 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1195 sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) }, 1196 { PCI_VENDOR_VIATECH, pciide_via_products, 1197 sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) }, 1198 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1199 sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) }, 1200 { PCI_VENDOR_SIS, pciide_sis_products, 1201 sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) }, 1202 { PCI_VENDOR_NS, pciide_natsemi_products, 1203 sizeof(pciide_natsemi_products)/sizeof(pciide_natsemi_products[0]) }, 1204 { PCI_VENDOR_ALI, pciide_acer_products, 1205 sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) }, 1206 { PCI_VENDOR_TRIONES, pciide_triones_products, 1207 sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) }, 1208 { PCI_VENDOR_ACARD, pciide_acard_products, 1209 sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) }, 1210 { PCI_VENDOR_RCC, pciide_serverworks_products, 1211 sizeof(pciide_serverworks_products)/sizeof(pciide_serverworks_products[0]) }, 1212 { PCI_VENDOR_PROMISE, pciide_promise_products, 1213 sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }, 1214 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1215 sizeof(pciide_nvidia_products)/sizeof(pciide_nvidia_products[0]) }, 1216 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1217 sizeof(pciide_ite_products)/sizeof(pciide_ite_products[0]) }, 1218 { PCI_VENDOR_ATI, pciide_ati_products, 1219 sizeof(pciide_ati_products)/sizeof(pciide_ati_products[0]) }, 1220 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1221 sizeof(pciide_jmicron_products)/sizeof(pciide_jmicron_products[0]) }, 1222 { PCI_VENDOR_PHISON, pciide_phison_products, 1223 sizeof(pciide_phison_products)/sizeof(pciide_phison_products[0]) } 1224 }; 1225 1226 /* options passed via the 'flags' config keyword */ 1227 #define PCIIDE_OPTIONS_DMA 0x01 1228 1229 int pciide_match(struct device *, void *, void *); 1230 void pciide_attach(struct device *, struct device *, void *); 1231 int pciide_detach(struct device *, int); 1232 1233 struct cfattach pciide_pci_ca = { 1234 sizeof(struct pciide_softc), pciide_match, pciide_attach, pciide_detach, 1235 }; 1236 1237 struct cfattach pciide_jmb_ca = { 1238 sizeof(struct pciide_softc), pciide_match, pciide_attach, pciide_detach, 1239 }; 1240 1241 struct cfdriver pciide_cd = { 1242 NULL, "pciide", DV_DULL 1243 }; 1244 1245 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1246 1247 const struct pciide_product_desc * 1248 pciide_lookup_product(u_int32_t id) 1249 { 1250 const struct pciide_product_desc *pp; 1251 const struct pciide_vendor_desc *vp; 1252 int i; 1253 1254 for (i = 0, vp = pciide_vendors; 1255 i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]); 1256 vp++, i++) 1257 if (PCI_VENDOR(id) == vp->ide_vendor) 1258 break; 1259 1260 if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0])) 1261 return (NULL); 1262 1263 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1264 if (PCI_PRODUCT(id) == pp->ide_product) 1265 break; 1266 1267 if (i == vp->ide_nproducts) 1268 return (NULL); 1269 return (pp); 1270 } 1271 1272 int 1273 pciide_match(struct device *parent, void *match, void *aux) 1274 { 1275 struct pci_attach_args *pa = aux; 1276 const struct pciide_product_desc *pp; 1277 1278 /* 1279 * Some IDE controllers have severe bugs when used in PCI mode. 1280 * We punt and attach them to the ISA bus instead. 1281 */ 1282 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1283 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1284 return (0); 1285 1286 /* 1287 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1288 * controllers. Let see if we can deal with it anyway. 1289 */ 1290 pp = pciide_lookup_product(pa->pa_id); 1291 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1292 return (1); 1293 1294 /* 1295 * Check the ID register to see that it's a PCI IDE controller. 1296 * If it is, we assume that we can deal with it; it _should_ 1297 * work in a standardized way... 1298 */ 1299 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1300 switch (PCI_SUBCLASS(pa->pa_class)) { 1301 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1302 return (1); 1303 1304 /* 1305 * We only match these if we know they have 1306 * a match, as we may not support native interfaces 1307 * on them. 1308 */ 1309 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1310 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1311 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1312 if (pp) 1313 return (1); 1314 else 1315 return (0); 1316 break; 1317 } 1318 } 1319 1320 return (0); 1321 } 1322 1323 void 1324 pciide_attach(struct device *parent, struct device *self, void *aux) 1325 { 1326 struct pciide_softc *sc = (struct pciide_softc *)self; 1327 struct pci_attach_args *pa = aux; 1328 1329 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1330 if (sc->sc_pp == NULL) 1331 sc->sc_pp = &default_product_desc; 1332 sc->sc_rev = PCI_REVISION(pa->pa_class); 1333 1334 sc->sc_pc = pa->pa_pc; 1335 sc->sc_tag = pa->pa_tag; 1336 1337 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1338 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1339 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1340 1341 sc->sc_dmacmd_read = pciide_dmacmd_read; 1342 sc->sc_dmacmd_write = pciide_dmacmd_write; 1343 sc->sc_dmactl_read = pciide_dmactl_read; 1344 sc->sc_dmactl_write = pciide_dmactl_write; 1345 sc->sc_dmatbl_write = pciide_dmatbl_write; 1346 1347 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1348 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1349 1350 sc->sc_pp->chip_map(sc, pa); 1351 1352 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1353 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1354 DEBUG_PROBE); 1355 } 1356 1357 int 1358 pciide_detach(struct device *self, int flags) 1359 { 1360 struct pciide_softc *sc = (struct pciide_softc *)self; 1361 if (sc->chip_unmap == NULL) 1362 panic("unmap not yet implemented for this chipset"); 1363 else 1364 sc->chip_unmap(sc, flags); 1365 1366 return 0; 1367 } 1368 1369 int 1370 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1371 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1372 { 1373 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1374 struct channel_softc *wdc_cp = &cp->wdc_channel; 1375 pcireg_t csr; 1376 1377 cp->compat = 1; 1378 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1379 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1380 1381 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1382 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1383 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1384 1385 wdc_cp->cmd_iot = pa->pa_iot; 1386 1387 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1388 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1389 printf("%s: couldn't map %s cmd regs\n", 1390 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1391 return (0); 1392 } 1393 1394 wdc_cp->ctl_iot = pa->pa_iot; 1395 1396 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1397 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1398 printf("%s: couldn't map %s ctl regs\n", 1399 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1400 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1401 PCIIDE_COMPAT_CMD_SIZE); 1402 return (0); 1403 } 1404 wdc_cp->cmd_iosz = *cmdsizep; 1405 wdc_cp->ctl_iosz = *ctlsizep; 1406 1407 return (1); 1408 } 1409 1410 int 1411 pciide_unmapregs_compat(struct pciide_softc *sc, struct pciide_channel *cp) 1412 { 1413 struct channel_softc *wdc_cp = &cp->wdc_channel; 1414 1415 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1416 bus_space_unmap(wdc_cp->ctl_iot, wdc_cp->cmd_ioh, wdc_cp->ctl_iosz); 1417 1418 if (sc->sc_pci_ih != NULL) { 1419 pciide_machdep_compat_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1420 sc->sc_pci_ih = NULL; 1421 } 1422 1423 return (0); 1424 } 1425 1426 int 1427 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1428 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1429 { 1430 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1431 struct channel_softc *wdc_cp = &cp->wdc_channel; 1432 const char *intrstr; 1433 pci_intr_handle_t intrhandle; 1434 pcireg_t maptype; 1435 1436 cp->compat = 0; 1437 1438 if (sc->sc_pci_ih == NULL) { 1439 if (pci_intr_map(pa, &intrhandle) != 0) { 1440 printf("%s: couldn't map native-PCI interrupt\n", 1441 sc->sc_wdcdev.sc_dev.dv_xname); 1442 return (0); 1443 } 1444 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1445 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1446 intrhandle, IPL_BIO, pci_intr, sc, 1447 sc->sc_wdcdev.sc_dev.dv_xname); 1448 if (sc->sc_pci_ih != NULL) { 1449 printf("%s: using %s for native-PCI interrupt\n", 1450 sc->sc_wdcdev.sc_dev.dv_xname, 1451 intrstr ? intrstr : "unknown interrupt"); 1452 } else { 1453 printf("%s: couldn't establish native-PCI interrupt", 1454 sc->sc_wdcdev.sc_dev.dv_xname); 1455 if (intrstr != NULL) 1456 printf(" at %s", intrstr); 1457 printf("\n"); 1458 return (0); 1459 } 1460 } 1461 cp->ih = sc->sc_pci_ih; 1462 sc->sc_pc = pa->pa_pc; 1463 1464 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1465 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1466 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1467 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1468 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1469 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1470 maptype, 0, 1471 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1472 printf("%s: couldn't map %s cmd regs\n", 1473 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1474 return (0); 1475 } 1476 1477 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1478 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1479 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1480 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1481 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1482 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1483 maptype, 0, 1484 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1485 printf("%s: couldn't map %s ctl regs\n", 1486 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1487 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1488 return (0); 1489 } 1490 /* 1491 * In native mode, 4 bytes of I/O space are mapped for the control 1492 * register, the control register is at offset 2. Pass the generic 1493 * code a handle for only one byte at the right offset. 1494 */ 1495 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1496 &wdc_cp->ctl_ioh) != 0) { 1497 printf("%s: unable to subregion %s ctl regs\n", 1498 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1499 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1500 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1501 return (0); 1502 } 1503 wdc_cp->cmd_iosz = *cmdsizep; 1504 wdc_cp->ctl_iosz = *ctlsizep; 1505 1506 return (1); 1507 } 1508 1509 int 1510 pciide_unmapregs_native(struct pciide_softc *sc, struct pciide_channel *cp) 1511 { 1512 struct channel_softc *wdc_cp = &cp->wdc_channel; 1513 1514 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1515 1516 /* Unmap the whole control space, not just the sub-region */ 1517 bus_space_unmap(wdc_cp->ctl_iot, cp->ctl_baseioh, wdc_cp->ctl_iosz); 1518 1519 if (sc->sc_pci_ih != NULL) { 1520 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1521 sc->sc_pci_ih = NULL; 1522 } 1523 1524 return (0); 1525 } 1526 1527 void 1528 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1529 { 1530 pcireg_t maptype; 1531 bus_addr_t addr; 1532 1533 /* 1534 * Map DMA registers 1535 * 1536 * Note that sc_dma_ok is the right variable to test to see if 1537 * DMA can be done. If the interface doesn't support DMA, 1538 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1539 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1540 * non-zero if the interface supports DMA and the registers 1541 * could be mapped. 1542 * 1543 * XXX Note that despite the fact that the Bus Master IDE specs 1544 * XXX say that "The bus master IDE function uses 16 bytes of IO 1545 * XXX space", some controllers (at least the United 1546 * XXX Microelectronics UM8886BF) place it in memory space. 1547 */ 1548 1549 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1550 PCIIDE_REG_BUS_MASTER_DMA); 1551 1552 switch (maptype) { 1553 case PCI_MAPREG_TYPE_IO: 1554 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1555 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1556 &addr, NULL, NULL) == 0); 1557 if (sc->sc_dma_ok == 0) { 1558 printf(", unused (couldn't query registers)"); 1559 break; 1560 } 1561 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1562 && addr >= 0x10000) { 1563 sc->sc_dma_ok = 0; 1564 printf(", unused (registers at unsafe address %#lx)", addr); 1565 break; 1566 } 1567 /* FALLTHROUGH */ 1568 1569 case PCI_MAPREG_MEM_TYPE_32BIT: 1570 sc->sc_dma_ok = (pci_mapreg_map(pa, 1571 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1572 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_iosz, 1573 0) == 0); 1574 sc->sc_dmat = pa->pa_dmat; 1575 if (sc->sc_dma_ok == 0) { 1576 printf(", unused (couldn't map registers)"); 1577 } else { 1578 sc->sc_wdcdev.dma_arg = sc; 1579 sc->sc_wdcdev.dma_init = pciide_dma_init; 1580 sc->sc_wdcdev.dma_start = pciide_dma_start; 1581 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1582 } 1583 break; 1584 1585 default: 1586 sc->sc_dma_ok = 0; 1587 printf(", (unsupported maptype 0x%x)", maptype); 1588 break; 1589 } 1590 } 1591 1592 void 1593 pciide_unmapreg_dma(struct pciide_softc *sc) 1594 { 1595 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_iosz); 1596 } 1597 1598 int 1599 pciide_intr_flag(struct pciide_channel *cp) 1600 { 1601 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1602 int chan = cp->wdc_channel.channel; 1603 1604 if (cp->dma_in_progress) { 1605 int retry = 10; 1606 int status; 1607 1608 /* Check the status register */ 1609 for (retry = 10; retry > 0; retry--) { 1610 status = PCIIDE_DMACTL_READ(sc, chan); 1611 if (status & IDEDMA_CTL_INTR) { 1612 break; 1613 } 1614 DELAY(5); 1615 } 1616 1617 /* Not for us. */ 1618 if (retry == 0) 1619 return (0); 1620 1621 return (1); 1622 } 1623 1624 return (-1); 1625 } 1626 1627 int 1628 pciide_compat_intr(void *arg) 1629 { 1630 struct pciide_channel *cp = arg; 1631 1632 if (pciide_intr_flag(cp) == 0) 1633 return (0); 1634 1635 #ifdef DIAGNOSTIC 1636 /* should only be called for a compat channel */ 1637 if (cp->compat == 0) 1638 panic("pciide compat intr called for non-compat chan %p", cp); 1639 #endif 1640 return (wdcintr(&cp->wdc_channel)); 1641 } 1642 1643 int 1644 pciide_pci_intr(void *arg) 1645 { 1646 struct pciide_softc *sc = arg; 1647 struct pciide_channel *cp; 1648 struct channel_softc *wdc_cp; 1649 int i, rv, crv; 1650 1651 rv = 0; 1652 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1653 cp = &sc->pciide_channels[i]; 1654 wdc_cp = &cp->wdc_channel; 1655 1656 /* If a compat channel skip. */ 1657 if (cp->compat) 1658 continue; 1659 1660 if (pciide_intr_flag(cp) == 0) 1661 continue; 1662 1663 crv = wdcintr(wdc_cp); 1664 if (crv == 0) 1665 ; /* leave rv alone */ 1666 else if (crv == 1) 1667 rv = 1; /* claim the intr */ 1668 else if (rv == 0) /* crv should be -1 in this case */ 1669 rv = crv; /* if we've done no better, take it */ 1670 } 1671 return (rv); 1672 } 1673 1674 u_int8_t 1675 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1676 { 1677 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1678 IDEDMA_CMD(chan))); 1679 } 1680 1681 void 1682 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1683 { 1684 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1685 IDEDMA_CMD(chan), val); 1686 } 1687 1688 u_int8_t 1689 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1690 { 1691 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1692 IDEDMA_CTL(chan))); 1693 } 1694 1695 void 1696 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 1697 { 1698 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1699 IDEDMA_CTL(chan), val); 1700 } 1701 1702 void 1703 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 1704 { 1705 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1706 IDEDMA_TBL(chan), val); 1707 } 1708 1709 void 1710 pciide_channel_dma_setup(struct pciide_channel *cp) 1711 { 1712 int drive; 1713 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1714 struct ata_drive_datas *drvp; 1715 1716 for (drive = 0; drive < 2; drive++) { 1717 drvp = &cp->wdc_channel.ch_drive[drive]; 1718 /* If no drive, skip */ 1719 if ((drvp->drive_flags & DRIVE) == 0) 1720 continue; 1721 /* setup DMA if needed */ 1722 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1723 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1724 sc->sc_dma_ok == 0) { 1725 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1726 continue; 1727 } 1728 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1729 != 0) { 1730 /* Abort DMA setup */ 1731 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1732 continue; 1733 } 1734 } 1735 } 1736 1737 int 1738 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 1739 { 1740 bus_dma_segment_t seg; 1741 int error, rseg; 1742 const bus_size_t dma_table_size = 1743 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1744 struct pciide_dma_maps *dma_maps = 1745 &sc->pciide_channels[channel].dma_maps[drive]; 1746 1747 /* If table was already allocated, just return */ 1748 if (dma_maps->dma_table) 1749 return (0); 1750 1751 /* Allocate memory for the DMA tables and map it */ 1752 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1753 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1754 BUS_DMA_NOWAIT)) != 0) { 1755 printf("%s:%d: unable to allocate table DMA for " 1756 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1757 channel, drive, error); 1758 return (error); 1759 } 1760 1761 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1762 dma_table_size, 1763 (caddr_t *)&dma_maps->dma_table, 1764 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1765 printf("%s:%d: unable to map table DMA for" 1766 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1767 channel, drive, error); 1768 return (error); 1769 } 1770 1771 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 1772 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 1773 seg.ds_addr), DEBUG_PROBE); 1774 1775 /* Create and load table DMA map for this disk */ 1776 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1777 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1778 &dma_maps->dmamap_table)) != 0) { 1779 printf("%s:%d: unable to create table DMA map for " 1780 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1781 channel, drive, error); 1782 return (error); 1783 } 1784 if ((error = bus_dmamap_load(sc->sc_dmat, 1785 dma_maps->dmamap_table, 1786 dma_maps->dma_table, 1787 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1788 printf("%s:%d: unable to load table DMA map for " 1789 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1790 channel, drive, error); 1791 return (error); 1792 } 1793 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1794 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 1795 /* Create a xfer DMA map for this drive */ 1796 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1797 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 1798 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1799 &dma_maps->dmamap_xfer)) != 0) { 1800 printf("%s:%d: unable to create xfer DMA map for " 1801 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1802 channel, drive, error); 1803 return (error); 1804 } 1805 return (0); 1806 } 1807 1808 int 1809 pciide_dma_init(void *v, int channel, int drive, void *databuf, 1810 size_t datalen, int flags) 1811 { 1812 struct pciide_softc *sc = v; 1813 int error, seg; 1814 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1815 struct pciide_dma_maps *dma_maps = 1816 &sc->pciide_channels[channel].dma_maps[drive]; 1817 #ifndef BUS_DMA_RAW 1818 #define BUS_DMA_RAW 0 1819 #endif 1820 1821 error = bus_dmamap_load(sc->sc_dmat, 1822 dma_maps->dmamap_xfer, 1823 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 1824 if (error) { 1825 printf("%s:%d: unable to load xfer DMA map for " 1826 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1827 channel, drive, error); 1828 return (error); 1829 } 1830 1831 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1832 dma_maps->dmamap_xfer->dm_mapsize, 1833 (flags & WDC_DMA_READ) ? 1834 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1835 1836 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1837 #ifdef DIAGNOSTIC 1838 /* A segment must not cross a 64k boundary */ 1839 { 1840 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1841 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1842 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1843 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1844 printf("pciide_dma: segment %d physical addr 0x%lx" 1845 " len 0x%lx not properly aligned\n", 1846 seg, phys, len); 1847 panic("pciide_dma: buf align"); 1848 } 1849 } 1850 #endif 1851 dma_maps->dma_table[seg].base_addr = 1852 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1853 dma_maps->dma_table[seg].byte_count = 1854 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1855 IDEDMA_BYTE_COUNT_MASK); 1856 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1857 seg, letoh32(dma_maps->dma_table[seg].byte_count), 1858 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1859 1860 } 1861 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1862 htole32(IDEDMA_BYTE_COUNT_EOT); 1863 1864 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1865 dma_maps->dmamap_table->dm_mapsize, 1866 BUS_DMASYNC_PREWRITE); 1867 1868 /* Maps are ready. Start DMA function */ 1869 #ifdef DIAGNOSTIC 1870 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1871 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1872 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1873 panic("pciide_dma_init: table align"); 1874 } 1875 #endif 1876 1877 /* Clear status bits */ 1878 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 1879 /* Write table addr */ 1880 PCIIDE_DMATBL_WRITE(sc, channel, 1881 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1882 /* set read/write */ 1883 PCIIDE_DMACMD_WRITE(sc, channel, 1884 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 1885 /* remember flags */ 1886 dma_maps->dma_flags = flags; 1887 return (0); 1888 } 1889 1890 void 1891 pciide_dma_start(void *v, int channel, int drive) 1892 { 1893 struct pciide_softc *sc = v; 1894 1895 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 1896 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 1897 IDEDMA_CMD_START); 1898 1899 sc->pciide_channels[channel].dma_in_progress = 1; 1900 } 1901 1902 int 1903 pciide_dma_finish(void *v, int channel, int drive, int force) 1904 { 1905 struct pciide_softc *sc = v; 1906 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1907 u_int8_t status; 1908 int error = 0; 1909 struct pciide_dma_maps *dma_maps = 1910 &sc->pciide_channels[channel].dma_maps[drive]; 1911 1912 status = PCIIDE_DMACTL_READ(sc, channel); 1913 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1914 DEBUG_XFERS); 1915 1916 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 1917 error = WDC_DMAST_NOIRQ; 1918 goto done; 1919 } 1920 1921 /* stop DMA channel */ 1922 PCIIDE_DMACMD_WRITE(sc, channel, 1923 ((dma_maps->dma_flags & WDC_DMA_READ) ? 1924 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 1925 1926 /* Unload the map of the data buffer */ 1927 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1928 dma_maps->dmamap_xfer->dm_mapsize, 1929 (dma_maps->dma_flags & WDC_DMA_READ) ? 1930 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1931 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1932 1933 /* Clear status bits */ 1934 PCIIDE_DMACTL_WRITE(sc, channel, status); 1935 1936 if ((status & IDEDMA_CTL_ERR) != 0) { 1937 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1938 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1939 error |= WDC_DMAST_ERR; 1940 } 1941 1942 if ((status & IDEDMA_CTL_INTR) == 0) { 1943 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1944 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1945 drive, status); 1946 error |= WDC_DMAST_NOIRQ; 1947 } 1948 1949 if ((status & IDEDMA_CTL_ACT) != 0) { 1950 /* data underrun, may be a valid condition for ATAPI */ 1951 error |= WDC_DMAST_UNDER; 1952 } 1953 1954 done: 1955 sc->pciide_channels[channel].dma_in_progress = 0; 1956 return (error); 1957 } 1958 1959 void 1960 pciide_irqack(struct channel_softc *chp) 1961 { 1962 struct pciide_channel *cp = (struct pciide_channel *)chp; 1963 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1964 int chan = chp->channel; 1965 1966 /* clear status bits in IDE DMA registers */ 1967 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 1968 } 1969 1970 /* some common code used by several chip_map */ 1971 int 1972 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 1973 { 1974 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1975 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1976 cp->name = PCIIDE_CHANNEL_NAME(channel); 1977 cp->wdc_channel.channel = channel; 1978 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1979 cp->wdc_channel.ch_queue = 1980 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1981 if (cp->wdc_channel.ch_queue == NULL) { 1982 printf("%s: %s " 1983 "cannot allocate memory for command queue", 1984 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1985 return (0); 1986 } 1987 cp->hw_ok = 1; 1988 1989 return (1); 1990 } 1991 1992 void 1993 pciide_chanfree(struct pciide_softc *sc, int channel) 1994 { 1995 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1996 if (cp->wdc_channel.ch_queue) 1997 free(cp->wdc_channel.ch_queue, M_DEVBUF); 1998 } 1999 2000 /* some common code used by several chip channel_map */ 2001 void 2002 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 2003 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 2004 int (*pci_intr)(void *)) 2005 { 2006 struct channel_softc *wdc_cp = &cp->wdc_channel; 2007 2008 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 2009 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 2010 pci_intr); 2011 else 2012 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2013 wdc_cp->channel, cmdsizep, ctlsizep); 2014 if (cp->hw_ok == 0) 2015 return; 2016 wdc_cp->data32iot = wdc_cp->cmd_iot; 2017 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 2018 wdcattach(wdc_cp); 2019 } 2020 2021 void 2022 pciide_unmap_chan(struct pciide_softc *sc, struct pciide_channel *cp, int flags) 2023 { 2024 struct channel_softc *wdc_cp = &cp->wdc_channel; 2025 2026 wdcdetach(wdc_cp, flags); 2027 2028 if (cp->compat != 0) 2029 pciide_unmapregs_compat(sc, cp); 2030 else 2031 pciide_unmapregs_native(sc, cp); 2032 } 2033 2034 /* 2035 * Generic code to call to know if a channel can be disabled. Return 1 2036 * if channel can be disabled, 0 if not 2037 */ 2038 int 2039 pciide_chan_candisable(struct pciide_channel *cp) 2040 { 2041 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2042 struct channel_softc *wdc_cp = &cp->wdc_channel; 2043 2044 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 2045 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 2046 printf("%s: %s disabled (no drives)\n", 2047 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2048 cp->hw_ok = 0; 2049 return (1); 2050 } 2051 return (0); 2052 } 2053 2054 /* 2055 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 2056 * Set hw_ok=0 on failure 2057 */ 2058 void 2059 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2060 int compatchan, int interface) 2061 { 2062 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2063 struct channel_softc *wdc_cp = &cp->wdc_channel; 2064 2065 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2066 return; 2067 2068 cp->compat = 1; 2069 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 2070 pa, compatchan, pciide_compat_intr, cp); 2071 if (cp->ih == NULL) { 2072 printf("%s: no compatibility interrupt for use by %s\n", 2073 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2074 cp->hw_ok = 0; 2075 } 2076 } 2077 2078 /* 2079 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 2080 * Set hw_ok=0 on failure 2081 */ 2082 void 2083 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2084 int compatchan, int interface) 2085 { 2086 struct channel_softc *wdc_cp = &cp->wdc_channel; 2087 2088 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2089 return; 2090 2091 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 2092 } 2093 2094 void 2095 pciide_print_channels(int nchannels, pcireg_t interface) 2096 { 2097 int i; 2098 2099 for (i = 0; i < nchannels; i++) { 2100 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 2101 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 2102 "configured" : "wired", 2103 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 2104 "compatibility"); 2105 } 2106 2107 printf("\n"); 2108 } 2109 2110 void 2111 pciide_print_modes(struct pciide_channel *cp) 2112 { 2113 wdc_print_current_modes(&cp->wdc_channel); 2114 } 2115 2116 void 2117 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2118 { 2119 struct pciide_channel *cp; 2120 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2121 pcireg_t csr; 2122 int channel, drive; 2123 struct ata_drive_datas *drvp; 2124 u_int8_t idedma_ctl; 2125 bus_size_t cmdsize, ctlsize; 2126 char *failreason; 2127 2128 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2129 printf(": DMA"); 2130 if (sc->sc_pp == &default_product_desc && 2131 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2132 PCIIDE_OPTIONS_DMA) == 0) { 2133 printf(" (unsupported)"); 2134 sc->sc_dma_ok = 0; 2135 } else { 2136 pciide_mapreg_dma(sc, pa); 2137 if (sc->sc_dma_ok != 0) 2138 printf(", (partial support)"); 2139 } 2140 } else { 2141 printf(": no DMA"); 2142 sc->sc_dma_ok = 0; 2143 } 2144 if (sc->sc_dma_ok) { 2145 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2146 sc->sc_wdcdev.irqack = pciide_irqack; 2147 } 2148 sc->sc_wdcdev.PIO_cap = 0; 2149 sc->sc_wdcdev.DMA_cap = 0; 2150 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2151 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2152 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2153 2154 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2155 2156 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2157 cp = &sc->pciide_channels[channel]; 2158 if (pciide_chansetup(sc, channel, interface) == 0) 2159 continue; 2160 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2161 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2162 &ctlsize, pciide_pci_intr); 2163 } else { 2164 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2165 channel, &cmdsize, &ctlsize); 2166 } 2167 if (cp->hw_ok == 0) 2168 continue; 2169 /* 2170 * Check to see if something appears to be there. 2171 */ 2172 failreason = NULL; 2173 pciide_map_compat_intr(pa, cp, channel, interface); 2174 if (cp->hw_ok == 0) 2175 continue; 2176 if (!wdcprobe(&cp->wdc_channel)) { 2177 failreason = "not responding; disabled or no drives?"; 2178 goto next; 2179 } 2180 /* 2181 * Now, make sure it's actually attributable to this PCI IDE 2182 * channel by trying to access the channel again while the 2183 * PCI IDE controller's I/O space is disabled. (If the 2184 * channel no longer appears to be there, it belongs to 2185 * this controller.) YUCK! 2186 */ 2187 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2188 PCI_COMMAND_STATUS_REG); 2189 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2190 csr & ~PCI_COMMAND_IO_ENABLE); 2191 if (wdcprobe(&cp->wdc_channel)) 2192 failreason = "other hardware responding at addresses"; 2193 pci_conf_write(sc->sc_pc, sc->sc_tag, 2194 PCI_COMMAND_STATUS_REG, csr); 2195 next: 2196 if (failreason) { 2197 printf("%s: %s ignored (%s)\n", 2198 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2199 failreason); 2200 cp->hw_ok = 0; 2201 pciide_unmap_compat_intr(pa, cp, channel, interface); 2202 bus_space_unmap(cp->wdc_channel.cmd_iot, 2203 cp->wdc_channel.cmd_ioh, cmdsize); 2204 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2205 bus_space_unmap(cp->wdc_channel.ctl_iot, 2206 cp->ctl_baseioh, ctlsize); 2207 else 2208 bus_space_unmap(cp->wdc_channel.ctl_iot, 2209 cp->wdc_channel.ctl_ioh, ctlsize); 2210 } 2211 if (cp->hw_ok) { 2212 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2213 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2214 wdcattach(&cp->wdc_channel); 2215 } 2216 } 2217 2218 if (sc->sc_dma_ok == 0) 2219 return; 2220 2221 /* Allocate DMA maps */ 2222 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2223 idedma_ctl = 0; 2224 cp = &sc->pciide_channels[channel]; 2225 for (drive = 0; drive < 2; drive++) { 2226 drvp = &cp->wdc_channel.ch_drive[drive]; 2227 /* If no drive, skip */ 2228 if ((drvp->drive_flags & DRIVE) == 0) 2229 continue; 2230 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2231 continue; 2232 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2233 /* Abort DMA setup */ 2234 printf("%s:%d:%d: cannot allocate DMA maps, " 2235 "using PIO transfers\n", 2236 sc->sc_wdcdev.sc_dev.dv_xname, 2237 channel, drive); 2238 drvp->drive_flags &= ~DRIVE_DMA; 2239 } 2240 printf("%s:%d:%d: using DMA data transfers\n", 2241 sc->sc_wdcdev.sc_dev.dv_xname, 2242 channel, drive); 2243 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2244 } 2245 if (idedma_ctl != 0) { 2246 /* Add software bits in status register */ 2247 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2248 } 2249 } 2250 } 2251 2252 void 2253 default_chip_unmap(struct pciide_softc *sc, int flags) 2254 { 2255 struct pciide_channel *cp; 2256 int channel; 2257 2258 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2259 cp = &sc->pciide_channels[channel]; 2260 pciide_unmap_chan(sc, cp, flags); 2261 pciide_chanfree(sc, channel); 2262 } 2263 2264 pciide_unmapreg_dma(sc); 2265 2266 if (sc->sc_cookie) 2267 free(sc->sc_cookie, M_DEVBUF); 2268 } 2269 2270 void 2271 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2272 { 2273 struct pciide_channel *cp; 2274 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2275 int channel; 2276 bus_size_t cmdsize, ctlsize; 2277 2278 if (interface == 0) { 2279 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2280 DEBUG_PROBE); 2281 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2282 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2283 } 2284 2285 printf(": DMA"); 2286 pciide_mapreg_dma(sc, pa); 2287 printf("\n"); 2288 2289 if (sc->sc_dma_ok) { 2290 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2291 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2292 sc->sc_wdcdev.irqack = pciide_irqack; 2293 } 2294 sc->sc_wdcdev.PIO_cap = 4; 2295 sc->sc_wdcdev.DMA_cap = 2; 2296 sc->sc_wdcdev.UDMA_cap = 6; 2297 2298 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2299 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2300 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2301 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2302 sc->sc_wdcdev.set_modes = sata_setup_channel; 2303 2304 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2305 cp = &sc->pciide_channels[channel]; 2306 if (pciide_chansetup(sc, channel, interface) == 0) 2307 continue; 2308 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2309 pciide_pci_intr); 2310 sata_setup_channel(&cp->wdc_channel); 2311 } 2312 } 2313 2314 void 2315 sata_setup_channel(struct channel_softc *chp) 2316 { 2317 struct ata_drive_datas *drvp; 2318 int drive; 2319 u_int32_t idedma_ctl; 2320 struct pciide_channel *cp = (struct pciide_channel *)chp; 2321 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2322 2323 /* setup DMA if needed */ 2324 pciide_channel_dma_setup(cp); 2325 2326 idedma_ctl = 0; 2327 2328 for (drive = 0; drive < 2; drive++) { 2329 drvp = &chp->ch_drive[drive]; 2330 /* If no drive, skip */ 2331 if ((drvp->drive_flags & DRIVE) == 0) 2332 continue; 2333 if (drvp->drive_flags & DRIVE_UDMA) { 2334 /* use Ultra/DMA */ 2335 drvp->drive_flags &= ~DRIVE_DMA; 2336 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2337 } else if (drvp->drive_flags & DRIVE_DMA) { 2338 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2339 } 2340 } 2341 2342 /* 2343 * Nothing to do to setup modes; it is meaningless in S-ATA 2344 * (but many S-ATA drives still want to get the SET_FEATURE 2345 * command). 2346 */ 2347 if (idedma_ctl != 0) { 2348 /* Add software bits in status register */ 2349 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2350 } 2351 pciide_print_modes(cp); 2352 } 2353 2354 void 2355 piix_timing_debug(struct pciide_softc *sc) 2356 { 2357 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2358 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2359 DEBUG_PROBE); 2360 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2361 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2362 WDCDEBUG_PRINT((", sidetim=0x%x", 2363 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2364 DEBUG_PROBE); 2365 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2366 WDCDEBUG_PRINT((", udmareg 0x%x", 2367 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2368 DEBUG_PROBE); 2369 } 2370 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2371 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2372 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2373 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2374 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2375 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2376 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2377 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2378 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2379 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2380 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2381 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2382 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2383 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2384 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2385 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2386 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2387 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2388 DEBUG_PROBE); 2389 } 2390 } 2391 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2392 } 2393 2394 void 2395 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2396 { 2397 struct pciide_channel *cp; 2398 int channel; 2399 u_int32_t idetim; 2400 bus_size_t cmdsize, ctlsize; 2401 2402 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2403 2404 printf(": DMA"); 2405 pciide_mapreg_dma(sc, pa); 2406 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2407 WDC_CAPABILITY_MODE; 2408 if (sc->sc_dma_ok) { 2409 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2410 sc->sc_wdcdev.irqack = pciide_irqack; 2411 switch (sc->sc_pp->ide_product) { 2412 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2413 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2414 case PCI_PRODUCT_INTEL_82371AB_IDE: 2415 case PCI_PRODUCT_INTEL_82372FB_IDE: 2416 case PCI_PRODUCT_INTEL_82440MX_IDE: 2417 case PCI_PRODUCT_INTEL_82451NX: 2418 case PCI_PRODUCT_INTEL_82801AA_IDE: 2419 case PCI_PRODUCT_INTEL_82801AB_IDE: 2420 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2421 case PCI_PRODUCT_INTEL_82801BA_IDE: 2422 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2423 case PCI_PRODUCT_INTEL_82801CA_IDE: 2424 case PCI_PRODUCT_INTEL_82801DB_IDE: 2425 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2426 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2427 case PCI_PRODUCT_INTEL_82801EB_IDE: 2428 case PCI_PRODUCT_INTEL_82801FB_IDE: 2429 case PCI_PRODUCT_INTEL_82801GB_IDE: 2430 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2431 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2432 break; 2433 } 2434 } 2435 sc->sc_wdcdev.PIO_cap = 4; 2436 sc->sc_wdcdev.DMA_cap = 2; 2437 switch (sc->sc_pp->ide_product) { 2438 case PCI_PRODUCT_INTEL_82801AA_IDE: 2439 case PCI_PRODUCT_INTEL_82372FB_IDE: 2440 sc->sc_wdcdev.UDMA_cap = 4; 2441 break; 2442 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2443 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2444 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2445 case PCI_PRODUCT_INTEL_82801BA_IDE: 2446 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2447 case PCI_PRODUCT_INTEL_82801CA_IDE: 2448 case PCI_PRODUCT_INTEL_82801DB_IDE: 2449 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2450 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2451 case PCI_PRODUCT_INTEL_82801EB_IDE: 2452 case PCI_PRODUCT_INTEL_82801FB_IDE: 2453 case PCI_PRODUCT_INTEL_82801GB_IDE: 2454 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2455 sc->sc_wdcdev.UDMA_cap = 5; 2456 break; 2457 default: 2458 sc->sc_wdcdev.UDMA_cap = 2; 2459 break; 2460 } 2461 2462 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2463 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2464 sc->sc_wdcdev.set_modes = piix_setup_channel; 2465 } else { 2466 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2467 } 2468 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2469 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2470 2471 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2472 2473 piix_timing_debug(sc); 2474 2475 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2476 cp = &sc->pciide_channels[channel]; 2477 2478 /* PIIX is compat-only */ 2479 if (pciide_chansetup(sc, channel, 0) == 0) 2480 continue; 2481 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2482 if ((PIIX_IDETIM_READ(idetim, channel) & 2483 PIIX_IDETIM_IDE) == 0) { 2484 printf("%s: %s ignored (disabled)\n", 2485 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2486 continue; 2487 } 2488 /* PIIX are compat-only pciide devices */ 2489 pciide_map_compat_intr(pa, cp, channel, 0); 2490 if (cp->hw_ok == 0) 2491 continue; 2492 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 2493 if (cp->hw_ok == 0) 2494 goto next; 2495 if (pciide_chan_candisable(cp)) { 2496 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2497 channel); 2498 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2499 idetim); 2500 } 2501 if (cp->hw_ok == 0) 2502 goto next; 2503 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2504 next: 2505 if (cp->hw_ok == 0) 2506 pciide_unmap_compat_intr(pa, cp, channel, 0); 2507 } 2508 2509 piix_timing_debug(sc); 2510 } 2511 2512 void 2513 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2514 { 2515 struct pciide_channel *cp; 2516 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2517 int channel; 2518 bus_size_t cmdsize, ctlsize; 2519 u_int8_t reg, ich = 0; 2520 2521 printf(": DMA"); 2522 pciide_mapreg_dma(sc, pa); 2523 2524 if (sc->sc_dma_ok) { 2525 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2526 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2527 sc->sc_wdcdev.irqack = pciide_irqack; 2528 sc->sc_wdcdev.DMA_cap = 2; 2529 sc->sc_wdcdev.UDMA_cap = 6; 2530 } 2531 sc->sc_wdcdev.PIO_cap = 4; 2532 2533 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2534 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2536 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2537 sc->sc_wdcdev.set_modes = sata_setup_channel; 2538 2539 switch(sc->sc_pp->ide_product) { 2540 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2541 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2542 case PCI_PRODUCT_INTEL_82801EB_SATA: 2543 case PCI_PRODUCT_INTEL_82801ER_SATA: 2544 ich = 5; 2545 break; 2546 case PCI_PRODUCT_INTEL_82801FB_SATA: 2547 case PCI_PRODUCT_INTEL_82801FR_SATA: 2548 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2549 ich = 6; 2550 break; 2551 default: 2552 ich = 7; 2553 break; 2554 } 2555 2556 /* 2557 * Put the SATA portion of controllers that don't operate in combined 2558 * mode into native PCI modes so the maximum number of devices can be 2559 * used. Intel calls this "enhanced mode" 2560 */ 2561 if (ich == 5) { 2562 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2563 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2564 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2565 ICH5_SATA_PI); 2566 reg |= ICH5_SATA_PI_PRI_NATIVE | 2567 ICH5_SATA_PI_SEC_NATIVE; 2568 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2569 ICH5_SATA_PI, reg); 2570 interface |= PCIIDE_INTERFACE_PCI(0) | 2571 PCIIDE_INTERFACE_PCI(1); 2572 } 2573 } else { 2574 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2575 ICH6_SATA_MAP_CMB_MASK; 2576 if (reg != ICH6_SATA_MAP_CMB_PRI && 2577 reg != ICH6_SATA_MAP_CMB_SEC) { 2578 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2579 ICH5_SATA_PI); 2580 reg |= ICH5_SATA_PI_PRI_NATIVE | 2581 ICH5_SATA_PI_SEC_NATIVE; 2582 2583 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2584 ICH5_SATA_PI, reg); 2585 interface |= PCIIDE_INTERFACE_PCI(0) | 2586 PCIIDE_INTERFACE_PCI(1); 2587 2588 /* 2589 * Ask for SATA IDE Mode, we don't need to do this 2590 * for the combined mode case as combined mode is 2591 * only allowed in IDE Mode 2592 */ 2593 if (ich >= 7) { 2594 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2595 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2596 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2597 ICH5_SATA_MAP, reg); 2598 } 2599 } 2600 } 2601 2602 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2603 2604 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2605 cp = &sc->pciide_channels[channel]; 2606 if (pciide_chansetup(sc, channel, interface) == 0) 2607 continue; 2608 2609 pciide_map_compat_intr(pa, cp, channel, interface); 2610 if (cp->hw_ok == 0) 2611 continue; 2612 2613 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2614 pciide_pci_intr); 2615 if (cp->hw_ok != 0) 2616 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2617 2618 if (cp->hw_ok == 0) 2619 pciide_unmap_compat_intr(pa, cp, channel, interface); 2620 } 2621 } 2622 2623 void 2624 piix_setup_channel(struct channel_softc *chp) 2625 { 2626 u_int8_t mode[2], drive; 2627 u_int32_t oidetim, idetim, idedma_ctl; 2628 struct pciide_channel *cp = (struct pciide_channel *)chp; 2629 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2630 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2631 2632 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2633 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2634 idedma_ctl = 0; 2635 2636 /* set up new idetim: Enable IDE registers decode */ 2637 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2638 chp->channel); 2639 2640 /* setup DMA */ 2641 pciide_channel_dma_setup(cp); 2642 2643 /* 2644 * Here we have to mess up with drives mode: PIIX can't have 2645 * different timings for master and slave drives. 2646 * We need to find the best combination. 2647 */ 2648 2649 /* If both drives supports DMA, take the lower mode */ 2650 if ((drvp[0].drive_flags & DRIVE_DMA) && 2651 (drvp[1].drive_flags & DRIVE_DMA)) { 2652 mode[0] = mode[1] = 2653 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2654 drvp[0].DMA_mode = mode[0]; 2655 drvp[1].DMA_mode = mode[1]; 2656 goto ok; 2657 } 2658 /* 2659 * If only one drive supports DMA, use its mode, and 2660 * put the other one in PIO mode 0 if mode not compatible 2661 */ 2662 if (drvp[0].drive_flags & DRIVE_DMA) { 2663 mode[0] = drvp[0].DMA_mode; 2664 mode[1] = drvp[1].PIO_mode; 2665 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2666 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2667 mode[1] = drvp[1].PIO_mode = 0; 2668 goto ok; 2669 } 2670 if (drvp[1].drive_flags & DRIVE_DMA) { 2671 mode[1] = drvp[1].DMA_mode; 2672 mode[0] = drvp[0].PIO_mode; 2673 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2674 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2675 mode[0] = drvp[0].PIO_mode = 0; 2676 goto ok; 2677 } 2678 /* 2679 * If both drives are not DMA, takes the lower mode, unless 2680 * one of them is PIO mode < 2 2681 */ 2682 if (drvp[0].PIO_mode < 2) { 2683 mode[0] = drvp[0].PIO_mode = 0; 2684 mode[1] = drvp[1].PIO_mode; 2685 } else if (drvp[1].PIO_mode < 2) { 2686 mode[1] = drvp[1].PIO_mode = 0; 2687 mode[0] = drvp[0].PIO_mode; 2688 } else { 2689 mode[0] = mode[1] = 2690 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 2691 drvp[0].PIO_mode = mode[0]; 2692 drvp[1].PIO_mode = mode[1]; 2693 } 2694 ok: /* The modes are setup */ 2695 for (drive = 0; drive < 2; drive++) { 2696 if (drvp[drive].drive_flags & DRIVE_DMA) { 2697 idetim |= piix_setup_idetim_timings( 2698 mode[drive], 1, chp->channel); 2699 goto end; 2700 } 2701 } 2702 /* If we are there, none of the drives are DMA */ 2703 if (mode[0] >= 2) 2704 idetim |= piix_setup_idetim_timings( 2705 mode[0], 0, chp->channel); 2706 else 2707 idetim |= piix_setup_idetim_timings( 2708 mode[1], 0, chp->channel); 2709 end: /* 2710 * timing mode is now set up in the controller. Enable 2711 * it per-drive 2712 */ 2713 for (drive = 0; drive < 2; drive++) { 2714 /* If no drive, skip */ 2715 if ((drvp[drive].drive_flags & DRIVE) == 0) 2716 continue; 2717 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 2718 if (drvp[drive].drive_flags & DRIVE_DMA) 2719 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2720 } 2721 if (idedma_ctl != 0) { 2722 /* Add software bits in status register */ 2723 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2724 IDEDMA_CTL(chp->channel), 2725 idedma_ctl); 2726 } 2727 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2728 pciide_print_modes(cp); 2729 } 2730 2731 void 2732 piix3_4_setup_channel(struct channel_softc *chp) 2733 { 2734 struct ata_drive_datas *drvp; 2735 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 2736 struct pciide_channel *cp = (struct pciide_channel *)chp; 2737 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2738 int drive; 2739 int channel = chp->channel; 2740 2741 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2742 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 2743 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 2744 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 2745 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 2746 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 2747 PIIX_SIDETIM_RTC_MASK(channel)); 2748 2749 idedma_ctl = 0; 2750 /* If channel disabled, no need to go further */ 2751 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 2752 return; 2753 /* set up new idetim: Enable IDE registers decode */ 2754 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 2755 2756 /* setup DMA if needed */ 2757 pciide_channel_dma_setup(cp); 2758 2759 for (drive = 0; drive < 2; drive++) { 2760 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 2761 PIIX_UDMATIM_SET(0x3, channel, drive)); 2762 drvp = &chp->ch_drive[drive]; 2763 /* If no drive, skip */ 2764 if ((drvp->drive_flags & DRIVE) == 0) 2765 continue; 2766 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2767 (drvp->drive_flags & DRIVE_UDMA) == 0)) 2768 goto pio; 2769 2770 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2771 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2772 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2773 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2774 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2775 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2776 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2777 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2778 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2779 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2780 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2781 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2782 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2783 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2784 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2785 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2786 ideconf |= PIIX_CONFIG_PINGPONG; 2787 } 2788 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2789 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2790 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2791 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 2792 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 2793 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2794 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2795 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2796 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2797 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2798 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2799 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2800 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 2801 /* setup Ultra/100 */ 2802 if (drvp->UDMA_mode > 2 && 2803 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2804 drvp->UDMA_mode = 2; 2805 if (drvp->UDMA_mode > 4) { 2806 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 2807 } else { 2808 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 2809 if (drvp->UDMA_mode > 2) { 2810 ideconf |= PIIX_CONFIG_UDMA66(channel, 2811 drive); 2812 } else { 2813 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 2814 drive); 2815 } 2816 } 2817 } 2818 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2819 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2820 /* setup Ultra/66 */ 2821 if (drvp->UDMA_mode > 2 && 2822 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2823 drvp->UDMA_mode = 2; 2824 if (drvp->UDMA_mode > 2) 2825 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 2826 else 2827 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 2828 } 2829 2830 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2831 (drvp->drive_flags & DRIVE_UDMA)) { 2832 /* use Ultra/DMA */ 2833 drvp->drive_flags &= ~DRIVE_DMA; 2834 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 2835 udmareg |= PIIX_UDMATIM_SET( 2836 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 2837 } else { 2838 /* use Multiword DMA */ 2839 drvp->drive_flags &= ~DRIVE_UDMA; 2840 if (drive == 0) { 2841 idetim |= piix_setup_idetim_timings( 2842 drvp->DMA_mode, 1, channel); 2843 } else { 2844 sidetim |= piix_setup_sidetim_timings( 2845 drvp->DMA_mode, 1, channel); 2846 idetim = PIIX_IDETIM_SET(idetim, 2847 PIIX_IDETIM_SITRE, channel); 2848 } 2849 } 2850 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2851 2852 pio: /* use PIO mode */ 2853 idetim |= piix_setup_idetim_drvs(drvp); 2854 if (drive == 0) { 2855 idetim |= piix_setup_idetim_timings( 2856 drvp->PIO_mode, 0, channel); 2857 } else { 2858 sidetim |= piix_setup_sidetim_timings( 2859 drvp->PIO_mode, 0, channel); 2860 idetim = PIIX_IDETIM_SET(idetim, 2861 PIIX_IDETIM_SITRE, channel); 2862 } 2863 } 2864 if (idedma_ctl != 0) { 2865 /* Add software bits in status register */ 2866 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2867 IDEDMA_CTL(channel), 2868 idedma_ctl); 2869 } 2870 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2871 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 2872 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 2873 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 2874 pciide_print_modes(cp); 2875 } 2876 2877 2878 /* setup ISP and RTC fields, based on mode */ 2879 u_int32_t 2880 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 2881 { 2882 2883 if (dma) 2884 return (PIIX_IDETIM_SET(0, 2885 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 2886 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 2887 channel)); 2888 else 2889 return (PIIX_IDETIM_SET(0, 2890 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 2891 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 2892 channel)); 2893 } 2894 2895 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 2896 u_int32_t 2897 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 2898 { 2899 u_int32_t ret = 0; 2900 struct channel_softc *chp = drvp->chnl_softc; 2901 u_int8_t channel = chp->channel; 2902 u_int8_t drive = drvp->drive; 2903 2904 /* 2905 * If drive is using UDMA, timings setups are independant 2906 * So just check DMA and PIO here. 2907 */ 2908 if (drvp->drive_flags & DRIVE_DMA) { 2909 /* if mode = DMA mode 0, use compatible timings */ 2910 if ((drvp->drive_flags & DRIVE_DMA) && 2911 drvp->DMA_mode == 0) { 2912 drvp->PIO_mode = 0; 2913 return (ret); 2914 } 2915 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2916 /* 2917 * PIO and DMA timings are the same, use fast timings for PIO 2918 * too, else use compat timings. 2919 */ 2920 if ((piix_isp_pio[drvp->PIO_mode] != 2921 piix_isp_dma[drvp->DMA_mode]) || 2922 (piix_rtc_pio[drvp->PIO_mode] != 2923 piix_rtc_dma[drvp->DMA_mode])) 2924 drvp->PIO_mode = 0; 2925 /* if PIO mode <= 2, use compat timings for PIO */ 2926 if (drvp->PIO_mode <= 2) { 2927 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 2928 channel); 2929 return (ret); 2930 } 2931 } 2932 2933 /* 2934 * Now setup PIO modes. If mode < 2, use compat timings. 2935 * Else enable fast timings. Enable IORDY and prefetch/post 2936 * if PIO mode >= 3. 2937 */ 2938 2939 if (drvp->PIO_mode < 2) 2940 return (ret); 2941 2942 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2943 if (drvp->PIO_mode >= 3) { 2944 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 2945 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 2946 } 2947 return (ret); 2948 } 2949 2950 /* setup values in SIDETIM registers, based on mode */ 2951 u_int32_t 2952 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 2953 { 2954 if (dma) 2955 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 2956 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 2957 else 2958 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 2959 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 2960 } 2961 2962 void 2963 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2964 { 2965 struct pciide_channel *cp; 2966 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2967 int channel; 2968 pcireg_t chanenable; 2969 bus_size_t cmdsize, ctlsize; 2970 2971 printf(": DMA"); 2972 pciide_mapreg_dma(sc, pa); 2973 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2974 WDC_CAPABILITY_MODE; 2975 if (sc->sc_dma_ok) { 2976 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2977 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2978 sc->sc_wdcdev.irqack = pciide_irqack; 2979 } 2980 sc->sc_wdcdev.PIO_cap = 4; 2981 sc->sc_wdcdev.DMA_cap = 2; 2982 switch (sc->sc_pp->ide_product) { 2983 case PCI_PRODUCT_AMD_8111_IDE: 2984 sc->sc_wdcdev.UDMA_cap = 6; 2985 break; 2986 case PCI_PRODUCT_AMD_766_IDE: 2987 case PCI_PRODUCT_AMD_PBC768_IDE: 2988 sc->sc_wdcdev.UDMA_cap = 5; 2989 break; 2990 default: 2991 sc->sc_wdcdev.UDMA_cap = 4; 2992 break; 2993 } 2994 sc->sc_wdcdev.set_modes = amd756_setup_channel; 2995 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2996 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2997 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 2998 2999 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3000 3001 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3002 cp = &sc->pciide_channels[channel]; 3003 if (pciide_chansetup(sc, channel, interface) == 0) 3004 continue; 3005 3006 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 3007 printf("%s: %s ignored (disabled)\n", 3008 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3009 continue; 3010 } 3011 pciide_map_compat_intr(pa, cp, channel, interface); 3012 if (cp->hw_ok == 0) 3013 continue; 3014 3015 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3016 pciide_pci_intr); 3017 3018 if (pciide_chan_candisable(cp)) { 3019 chanenable &= ~AMD756_CHAN_EN(channel); 3020 } 3021 if (cp->hw_ok == 0) { 3022 pciide_unmap_compat_intr(pa, cp, channel, interface); 3023 continue; 3024 } 3025 3026 amd756_setup_channel(&cp->wdc_channel); 3027 } 3028 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 3029 chanenable); 3030 return; 3031 } 3032 3033 void 3034 amd756_setup_channel(struct channel_softc *chp) 3035 { 3036 u_int32_t udmatim_reg, datatim_reg; 3037 u_int8_t idedma_ctl; 3038 int mode, drive; 3039 struct ata_drive_datas *drvp; 3040 struct pciide_channel *cp = (struct pciide_channel *)chp; 3041 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3042 pcireg_t chanenable; 3043 #ifndef PCIIDE_AMD756_ENABLEDMA 3044 int product = sc->sc_pp->ide_product; 3045 int rev = sc->sc_rev; 3046 #endif 3047 3048 idedma_ctl = 0; 3049 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 3050 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 3051 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 3052 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 3053 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 3054 AMD756_CHANSTATUS_EN); 3055 3056 /* setup DMA if needed */ 3057 pciide_channel_dma_setup(cp); 3058 3059 for (drive = 0; drive < 2; drive++) { 3060 drvp = &chp->ch_drive[drive]; 3061 /* If no drive, skip */ 3062 if ((drvp->drive_flags & DRIVE) == 0) 3063 continue; 3064 /* add timing values, setup DMA if needed */ 3065 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3066 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3067 mode = drvp->PIO_mode; 3068 goto pio; 3069 } 3070 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3071 (drvp->drive_flags & DRIVE_UDMA)) { 3072 /* use Ultra/DMA */ 3073 drvp->drive_flags &= ~DRIVE_DMA; 3074 3075 /* Check cable */ 3076 if ((chanenable & AMD756_CABLE(chp->channel, 3077 drive)) == 0 && drvp->UDMA_mode > 2) { 3078 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 3079 "cable not detected\n", drvp->drive_name, 3080 sc->sc_wdcdev.sc_dev.dv_xname, 3081 chp->channel, drive), DEBUG_PROBE); 3082 drvp->UDMA_mode = 2; 3083 } 3084 3085 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 3086 AMD756_UDMA_EN_MTH(chp->channel, drive) | 3087 AMD756_UDMA_TIME(chp->channel, drive, 3088 amd756_udma_tim[drvp->UDMA_mode]); 3089 /* can use PIO timings, MW DMA unused */ 3090 mode = drvp->PIO_mode; 3091 } else { 3092 /* use Multiword DMA, but only if revision is OK */ 3093 drvp->drive_flags &= ~DRIVE_UDMA; 3094 #ifndef PCIIDE_AMD756_ENABLEDMA 3095 /* 3096 * The workaround doesn't seem to be necessary 3097 * with all drives, so it can be disabled by 3098 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 3099 * triggered. 3100 */ 3101 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 3102 printf("%s:%d:%d: multi-word DMA disabled due " 3103 "to chip revision\n", 3104 sc->sc_wdcdev.sc_dev.dv_xname, 3105 chp->channel, drive); 3106 mode = drvp->PIO_mode; 3107 drvp->drive_flags &= ~DRIVE_DMA; 3108 goto pio; 3109 } 3110 #endif 3111 /* mode = min(pio, dma+2) */ 3112 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3113 mode = drvp->PIO_mode; 3114 else 3115 mode = drvp->DMA_mode + 2; 3116 } 3117 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3118 3119 pio: /* setup PIO mode */ 3120 if (mode <= 2) { 3121 drvp->DMA_mode = 0; 3122 drvp->PIO_mode = 0; 3123 mode = 0; 3124 } else { 3125 drvp->PIO_mode = mode; 3126 drvp->DMA_mode = mode - 2; 3127 } 3128 datatim_reg |= 3129 AMD756_DATATIM_PULSE(chp->channel, drive, 3130 amd756_pio_set[mode]) | 3131 AMD756_DATATIM_RECOV(chp->channel, drive, 3132 amd756_pio_rec[mode]); 3133 } 3134 if (idedma_ctl != 0) { 3135 /* Add software bits in status register */ 3136 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3137 IDEDMA_CTL(chp->channel), 3138 idedma_ctl); 3139 } 3140 pciide_print_modes(cp); 3141 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3142 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3143 } 3144 3145 void 3146 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3147 { 3148 struct pciide_channel *cp; 3149 pcireg_t interface; 3150 int channel; 3151 u_int32_t ideconf; 3152 bus_size_t cmdsize, ctlsize; 3153 pcitag_t tag; 3154 pcireg_t id, class; 3155 3156 /* 3157 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3158 */ 3159 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3160 interface = PCI_INTERFACE(pa->pa_class); 3161 } else { 3162 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3163 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3164 } 3165 3166 if ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT6410) || 3167 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_CX700_IDE) || 3168 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX700_IDE) || 3169 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VX855_IDE)) { 3170 printf(": ATA133"); 3171 sc->sc_wdcdev.UDMA_cap = 6; 3172 } else { 3173 /* 3174 * Determine the DMA capabilities by looking at the 3175 * ISA bridge. 3176 */ 3177 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3178 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3179 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3180 3181 /* 3182 * XXX On the VT8237, the ISA bridge is on a different 3183 * device. 3184 */ 3185 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3186 pa->pa_device == 15) { 3187 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3188 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3189 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3190 } 3191 3192 switch (PCI_PRODUCT(id)) { 3193 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3194 if (PCI_REVISION(class) >= 0x02) { 3195 printf(": ATA33"); 3196 sc->sc_wdcdev.UDMA_cap = 2; 3197 } else { 3198 printf(": DMA"); 3199 sc->sc_wdcdev.UDMA_cap = 0; 3200 } 3201 break; 3202 case PCI_PRODUCT_VIATECH_VT82C596A: 3203 if (PCI_REVISION(class) >= 0x12) { 3204 printf(": ATA66"); 3205 sc->sc_wdcdev.UDMA_cap = 4; 3206 } else { 3207 printf(": ATA33"); 3208 sc->sc_wdcdev.UDMA_cap = 2; 3209 } 3210 break; 3211 3212 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3213 if (PCI_REVISION(class) >= 0x40) { 3214 printf(": ATA100"); 3215 sc->sc_wdcdev.UDMA_cap = 5; 3216 } else { 3217 printf(": ATA66"); 3218 sc->sc_wdcdev.UDMA_cap = 4; 3219 } 3220 break; 3221 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3222 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3223 printf(": ATA100"); 3224 sc->sc_wdcdev.UDMA_cap = 5; 3225 break; 3226 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3227 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3228 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3229 printf(": ATA133"); 3230 sc->sc_wdcdev.UDMA_cap = 6; 3231 break; 3232 default: 3233 printf(": DMA"); 3234 sc->sc_wdcdev.UDMA_cap = 0; 3235 break; 3236 } 3237 } 3238 3239 pciide_mapreg_dma(sc, pa); 3240 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3241 WDC_CAPABILITY_MODE; 3242 if (sc->sc_dma_ok) { 3243 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3244 sc->sc_wdcdev.irqack = pciide_irqack; 3245 if (sc->sc_wdcdev.UDMA_cap > 0) 3246 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3247 } 3248 sc->sc_wdcdev.PIO_cap = 4; 3249 sc->sc_wdcdev.DMA_cap = 2; 3250 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3251 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3252 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3253 3254 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3255 3256 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3257 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3258 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3259 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3260 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3261 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3262 DEBUG_PROBE); 3263 3264 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3265 cp = &sc->pciide_channels[channel]; 3266 if (pciide_chansetup(sc, channel, interface) == 0) 3267 continue; 3268 3269 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 3270 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3271 printf("%s: %s ignored (disabled)\n", 3272 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3273 continue; 3274 } 3275 pciide_map_compat_intr(pa, cp, channel, interface); 3276 if (cp->hw_ok == 0) 3277 continue; 3278 3279 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3280 pciide_pci_intr); 3281 if (cp->hw_ok == 0) { 3282 goto next; 3283 } 3284 if (pciide_chan_candisable(cp)) { 3285 ideconf &= ~APO_IDECONF_EN(channel); 3286 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 3287 ideconf); 3288 } 3289 3290 if (cp->hw_ok == 0) 3291 goto next; 3292 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3293 next: 3294 if (cp->hw_ok == 0) 3295 pciide_unmap_compat_intr(pa, cp, channel, interface); 3296 } 3297 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3298 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3299 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3300 } 3301 3302 void 3303 apollo_setup_channel(struct channel_softc *chp) 3304 { 3305 u_int32_t udmatim_reg, datatim_reg; 3306 u_int8_t idedma_ctl; 3307 int mode, drive; 3308 struct ata_drive_datas *drvp; 3309 struct pciide_channel *cp = (struct pciide_channel *)chp; 3310 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3311 3312 idedma_ctl = 0; 3313 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3314 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3315 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3316 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3317 3318 /* setup DMA if needed */ 3319 pciide_channel_dma_setup(cp); 3320 3321 /* 3322 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3323 * downgrade to Ultra/33 if needed 3324 */ 3325 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3326 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3327 /* both drives UDMA */ 3328 if (chp->ch_drive[0].UDMA_mode > 2 && 3329 chp->ch_drive[1].UDMA_mode <= 2) { 3330 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3331 chp->ch_drive[0].UDMA_mode = 2; 3332 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3333 chp->ch_drive[0].UDMA_mode <= 2) { 3334 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3335 chp->ch_drive[1].UDMA_mode = 2; 3336 } 3337 } 3338 3339 for (drive = 0; drive < 2; drive++) { 3340 drvp = &chp->ch_drive[drive]; 3341 /* If no drive, skip */ 3342 if ((drvp->drive_flags & DRIVE) == 0) 3343 continue; 3344 /* add timing values, setup DMA if needed */ 3345 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3346 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3347 mode = drvp->PIO_mode; 3348 goto pio; 3349 } 3350 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3351 (drvp->drive_flags & DRIVE_UDMA)) { 3352 /* use Ultra/DMA */ 3353 drvp->drive_flags &= ~DRIVE_DMA; 3354 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3355 APO_UDMA_EN_MTH(chp->channel, drive); 3356 if (sc->sc_wdcdev.UDMA_cap == 6) { 3357 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3358 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3359 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3360 /* 686b */ 3361 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3362 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3363 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3364 /* 596b or 686a */ 3365 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3366 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3367 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3368 } else { 3369 /* 596a or 586b */ 3370 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3371 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3372 } 3373 /* can use PIO timings, MW DMA unused */ 3374 mode = drvp->PIO_mode; 3375 } else { 3376 /* use Multiword DMA */ 3377 drvp->drive_flags &= ~DRIVE_UDMA; 3378 /* mode = min(pio, dma+2) */ 3379 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3380 mode = drvp->PIO_mode; 3381 else 3382 mode = drvp->DMA_mode + 2; 3383 } 3384 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3385 3386 pio: /* setup PIO mode */ 3387 if (mode <= 2) { 3388 drvp->DMA_mode = 0; 3389 drvp->PIO_mode = 0; 3390 mode = 0; 3391 } else { 3392 drvp->PIO_mode = mode; 3393 drvp->DMA_mode = mode - 2; 3394 } 3395 datatim_reg |= 3396 APO_DATATIM_PULSE(chp->channel, drive, 3397 apollo_pio_set[mode]) | 3398 APO_DATATIM_RECOV(chp->channel, drive, 3399 apollo_pio_rec[mode]); 3400 } 3401 if (idedma_ctl != 0) { 3402 /* Add software bits in status register */ 3403 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3404 IDEDMA_CTL(chp->channel), 3405 idedma_ctl); 3406 } 3407 pciide_print_modes(cp); 3408 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3409 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3410 } 3411 3412 void 3413 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3414 int channel) 3415 { 3416 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3417 bus_size_t cmdsize, ctlsize; 3418 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3419 pcireg_t interface; 3420 int one_channel; 3421 3422 /* 3423 * The 0648/0649 can be told to identify as a RAID controller. 3424 * In this case, we have to fake interface 3425 */ 3426 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3427 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3428 PCIIDE_INTERFACE_SETTABLE(1); 3429 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3430 CMD_CONF_DSA1) 3431 interface |= PCIIDE_INTERFACE_PCI(0) | 3432 PCIIDE_INTERFACE_PCI(1); 3433 } else { 3434 interface = PCI_INTERFACE(pa->pa_class); 3435 } 3436 3437 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3438 cp->name = PCIIDE_CHANNEL_NAME(channel); 3439 cp->wdc_channel.channel = channel; 3440 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3441 3442 /* 3443 * Older CMD64X doesn't have independant channels 3444 */ 3445 switch (sc->sc_pp->ide_product) { 3446 case PCI_PRODUCT_CMDTECH_649: 3447 one_channel = 0; 3448 break; 3449 default: 3450 one_channel = 1; 3451 break; 3452 } 3453 3454 if (channel > 0 && one_channel) { 3455 cp->wdc_channel.ch_queue = 3456 sc->pciide_channels[0].wdc_channel.ch_queue; 3457 } else { 3458 cp->wdc_channel.ch_queue = 3459 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3460 } 3461 if (cp->wdc_channel.ch_queue == NULL) { 3462 printf( 3463 "%s: %s cannot allocate memory for command queue", 3464 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3465 return; 3466 } 3467 3468 /* 3469 * with a CMD PCI64x, if we get here, the first channel is enabled: 3470 * there's no way to disable the first channel without disabling 3471 * the whole device 3472 */ 3473 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3474 printf("%s: %s ignored (disabled)\n", 3475 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3476 return; 3477 } 3478 cp->hw_ok = 1; 3479 pciide_map_compat_intr(pa, cp, channel, interface); 3480 if (cp->hw_ok == 0) 3481 return; 3482 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3483 if (cp->hw_ok == 0) { 3484 pciide_unmap_compat_intr(pa, cp, channel, interface); 3485 return; 3486 } 3487 if (pciide_chan_candisable(cp)) { 3488 if (channel == 1) { 3489 ctrl &= ~CMD_CTRL_2PORT; 3490 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3491 CMD_CTRL, ctrl); 3492 pciide_unmap_compat_intr(pa, cp, channel, interface); 3493 } 3494 } 3495 } 3496 3497 int 3498 cmd_pci_intr(void *arg) 3499 { 3500 struct pciide_softc *sc = arg; 3501 struct pciide_channel *cp; 3502 struct channel_softc *wdc_cp; 3503 int i, rv, crv; 3504 u_int32_t priirq, secirq; 3505 3506 rv = 0; 3507 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3508 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3509 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3510 cp = &sc->pciide_channels[i]; 3511 wdc_cp = &cp->wdc_channel; 3512 /* If a compat channel skip. */ 3513 if (cp->compat) 3514 continue; 3515 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3516 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3517 crv = wdcintr(wdc_cp); 3518 if (crv == 0) { 3519 #if 0 3520 printf("%s:%d: bogus intr\n", 3521 sc->sc_wdcdev.sc_dev.dv_xname, i); 3522 #endif 3523 } else 3524 rv = 1; 3525 } 3526 } 3527 return (rv); 3528 } 3529 3530 void 3531 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3532 { 3533 int channel; 3534 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3535 3536 printf(": no DMA"); 3537 sc->sc_dma_ok = 0; 3538 3539 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3540 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3541 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3542 3543 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3544 3545 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3546 cmd_channel_map(pa, sc, channel); 3547 } 3548 } 3549 3550 void 3551 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3552 { 3553 struct pciide_channel *cp; 3554 int channel; 3555 int rev = sc->sc_rev; 3556 pcireg_t interface; 3557 3558 /* 3559 * The 0648/0649 can be told to identify as a RAID controller. 3560 * In this case, we have to fake interface 3561 */ 3562 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3563 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3564 PCIIDE_INTERFACE_SETTABLE(1); 3565 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3566 CMD_CONF_DSA1) 3567 interface |= PCIIDE_INTERFACE_PCI(0) | 3568 PCIIDE_INTERFACE_PCI(1); 3569 } else { 3570 interface = PCI_INTERFACE(pa->pa_class); 3571 } 3572 3573 printf(": DMA"); 3574 pciide_mapreg_dma(sc, pa); 3575 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3576 WDC_CAPABILITY_MODE; 3577 if (sc->sc_dma_ok) { 3578 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3579 switch (sc->sc_pp->ide_product) { 3580 case PCI_PRODUCT_CMDTECH_649: 3581 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3582 sc->sc_wdcdev.UDMA_cap = 5; 3583 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3584 break; 3585 case PCI_PRODUCT_CMDTECH_648: 3586 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3587 sc->sc_wdcdev.UDMA_cap = 4; 3588 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3589 break; 3590 case PCI_PRODUCT_CMDTECH_646: 3591 if (rev >= CMD0646U2_REV) { 3592 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3593 sc->sc_wdcdev.UDMA_cap = 2; 3594 } else if (rev >= CMD0646U_REV) { 3595 /* 3596 * Linux's driver claims that the 646U is broken 3597 * with UDMA. Only enable it if we know what we're 3598 * doing 3599 */ 3600 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3601 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3602 sc->sc_wdcdev.UDMA_cap = 2; 3603 #endif 3604 /* explicitly disable UDMA */ 3605 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3606 CMD_UDMATIM(0), 0); 3607 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3608 CMD_UDMATIM(1), 0); 3609 } 3610 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3611 break; 3612 default: 3613 sc->sc_wdcdev.irqack = pciide_irqack; 3614 } 3615 } 3616 3617 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3618 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3619 sc->sc_wdcdev.PIO_cap = 4; 3620 sc->sc_wdcdev.DMA_cap = 2; 3621 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3622 3623 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3624 3625 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3626 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3627 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3628 DEBUG_PROBE); 3629 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3630 cp = &sc->pciide_channels[channel]; 3631 cmd_channel_map(pa, sc, channel); 3632 if (cp->hw_ok == 0) 3633 continue; 3634 cmd0643_9_setup_channel(&cp->wdc_channel); 3635 } 3636 /* 3637 * note - this also makes sure we clear the irq disable and reset 3638 * bits 3639 */ 3640 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3641 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3642 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3643 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3644 DEBUG_PROBE); 3645 } 3646 3647 void 3648 cmd0643_9_setup_channel(struct channel_softc *chp) 3649 { 3650 struct ata_drive_datas *drvp; 3651 u_int8_t tim; 3652 u_int32_t idedma_ctl, udma_reg; 3653 int drive; 3654 struct pciide_channel *cp = (struct pciide_channel *)chp; 3655 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3656 3657 idedma_ctl = 0; 3658 /* setup DMA if needed */ 3659 pciide_channel_dma_setup(cp); 3660 3661 for (drive = 0; drive < 2; drive++) { 3662 drvp = &chp->ch_drive[drive]; 3663 /* If no drive, skip */ 3664 if ((drvp->drive_flags & DRIVE) == 0) 3665 continue; 3666 /* add timing values, setup DMA if needed */ 3667 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3668 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3669 if (drvp->drive_flags & DRIVE_UDMA) { 3670 /* UltraDMA on a 646U2, 0648 or 0649 */ 3671 drvp->drive_flags &= ~DRIVE_DMA; 3672 udma_reg = pciide_pci_read(sc->sc_pc, 3673 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3674 if (drvp->UDMA_mode > 2 && 3675 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3676 CMD_BICSR) & 3677 CMD_BICSR_80(chp->channel)) == 0) { 3678 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3679 "80-wire cable not detected\n", 3680 drvp->drive_name, 3681 sc->sc_wdcdev.sc_dev.dv_xname, 3682 chp->channel, drive), DEBUG_PROBE); 3683 drvp->UDMA_mode = 2; 3684 } 3685 if (drvp->UDMA_mode > 2) 3686 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 3687 else if (sc->sc_wdcdev.UDMA_cap > 2) 3688 udma_reg |= CMD_UDMATIM_UDMA33(drive); 3689 udma_reg |= CMD_UDMATIM_UDMA(drive); 3690 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 3691 CMD_UDMATIM_TIM_OFF(drive)); 3692 udma_reg |= 3693 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 3694 CMD_UDMATIM_TIM_OFF(drive)); 3695 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3696 CMD_UDMATIM(chp->channel), udma_reg); 3697 } else { 3698 /* 3699 * use Multiword DMA. 3700 * Timings will be used for both PIO and DMA, 3701 * so adjust DMA mode if needed 3702 * if we have a 0646U2/8/9, turn off UDMA 3703 */ 3704 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 3705 udma_reg = pciide_pci_read(sc->sc_pc, 3706 sc->sc_tag, 3707 CMD_UDMATIM(chp->channel)); 3708 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 3709 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3710 CMD_UDMATIM(chp->channel), 3711 udma_reg); 3712 } 3713 if (drvp->PIO_mode >= 3 && 3714 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3715 drvp->DMA_mode = drvp->PIO_mode - 2; 3716 } 3717 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 3718 } 3719 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3720 } 3721 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3722 CMD_DATA_TIM(chp->channel, drive), tim); 3723 } 3724 if (idedma_ctl != 0) { 3725 /* Add software bits in status register */ 3726 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3727 IDEDMA_CTL(chp->channel), 3728 idedma_ctl); 3729 } 3730 pciide_print_modes(cp); 3731 #ifdef __sparc64__ 3732 /* 3733 * The Ultra 5 has a tendency to hang during reboot. This is due 3734 * to the PCI0646U asserting a PCI interrupt line when the chip 3735 * registers claim that it is not. Performing a reset at this 3736 * point appears to eliminate the symptoms. It is likely the 3737 * real cause is still lurking somewhere in the code. 3738 */ 3739 wdcreset(chp, SILENT); 3740 #endif /* __sparc64__ */ 3741 } 3742 3743 void 3744 cmd646_9_irqack(struct channel_softc *chp) 3745 { 3746 u_int32_t priirq, secirq; 3747 struct pciide_channel *cp = (struct pciide_channel *)chp; 3748 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3749 3750 if (chp->channel == 0) { 3751 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3752 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 3753 } else { 3754 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3755 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 3756 } 3757 pciide_irqack(chp); 3758 } 3759 3760 void 3761 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3762 { 3763 struct pciide_channel *cp; 3764 int channel; 3765 3766 printf("\n%s: bus-master DMA support present", 3767 sc->sc_wdcdev.sc_dev.dv_xname); 3768 pciide_mapreg_dma(sc, pa); 3769 printf("\n"); 3770 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3771 WDC_CAPABILITY_MODE; 3772 if (sc->sc_dma_ok) { 3773 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3774 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3775 sc->sc_wdcdev.UDMA_cap = 6; 3776 sc->sc_wdcdev.irqack = pciide_irqack; 3777 } 3778 3779 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3780 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3781 sc->sc_wdcdev.PIO_cap = 4; 3782 sc->sc_wdcdev.DMA_cap = 2; 3783 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 3784 3785 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 3786 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 3787 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 3788 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 3789 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3790 cp = &sc->pciide_channels[channel]; 3791 cmd680_channel_map(pa, sc, channel); 3792 if (cp->hw_ok == 0) 3793 continue; 3794 cmd680_setup_channel(&cp->wdc_channel); 3795 } 3796 } 3797 3798 void 3799 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3800 int channel) 3801 { 3802 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3803 bus_size_t cmdsize, ctlsize; 3804 int interface, i, reg; 3805 static const u_int8_t init_val[] = 3806 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 3807 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 3808 3809 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3810 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3811 PCIIDE_INTERFACE_SETTABLE(1); 3812 interface |= PCIIDE_INTERFACE_PCI(0) | 3813 PCIIDE_INTERFACE_PCI(1); 3814 } else { 3815 interface = PCI_INTERFACE(pa->pa_class); 3816 } 3817 3818 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3819 cp->name = PCIIDE_CHANNEL_NAME(channel); 3820 cp->wdc_channel.channel = channel; 3821 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3822 3823 cp->wdc_channel.ch_queue = 3824 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3825 if (cp->wdc_channel.ch_queue == NULL) { 3826 printf("%s %s: " 3827 "can't allocate memory for command queue", 3828 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3829 return; 3830 } 3831 3832 /* XXX */ 3833 reg = 0xa2 + channel * 16; 3834 for (i = 0; i < sizeof(init_val); i++) 3835 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 3836 3837 printf("%s: %s %s to %s mode\n", 3838 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 3839 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 3840 "configured" : "wired", 3841 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 3842 "native-PCI" : "compatibility"); 3843 3844 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 3845 if (cp->hw_ok == 0) 3846 return; 3847 pciide_map_compat_intr(pa, cp, channel, interface); 3848 } 3849 3850 void 3851 cmd680_setup_channel(struct channel_softc *chp) 3852 { 3853 struct ata_drive_datas *drvp; 3854 u_int8_t mode, off, scsc; 3855 u_int16_t val; 3856 u_int32_t idedma_ctl; 3857 int drive; 3858 struct pciide_channel *cp = (struct pciide_channel *)chp; 3859 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3860 pci_chipset_tag_t pc = sc->sc_pc; 3861 pcitag_t pa = sc->sc_tag; 3862 static const u_int8_t udma2_tbl[] = 3863 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 3864 static const u_int8_t udma_tbl[] = 3865 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 3866 static const u_int16_t dma_tbl[] = 3867 { 0x2208, 0x10c2, 0x10c1 }; 3868 static const u_int16_t pio_tbl[] = 3869 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 3870 3871 idedma_ctl = 0; 3872 pciide_channel_dma_setup(cp); 3873 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 3874 3875 for (drive = 0; drive < 2; drive++) { 3876 drvp = &chp->ch_drive[drive]; 3877 /* If no drive, skip */ 3878 if ((drvp->drive_flags & DRIVE) == 0) 3879 continue; 3880 mode &= ~(0x03 << (drive * 4)); 3881 if (drvp->drive_flags & DRIVE_UDMA) { 3882 drvp->drive_flags &= ~DRIVE_DMA; 3883 off = 0xa0 + chp->channel * 16; 3884 if (drvp->UDMA_mode > 2 && 3885 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 3886 drvp->UDMA_mode = 2; 3887 scsc = pciide_pci_read(pc, pa, 0x8a); 3888 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 3889 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 3890 scsc = pciide_pci_read(pc, pa, 0x8a); 3891 if ((scsc & 0x30) == 0) 3892 drvp->UDMA_mode = 5; 3893 } 3894 mode |= 0x03 << (drive * 4); 3895 off = 0xac + chp->channel * 16 + drive * 2; 3896 val = pciide_pci_read(pc, pa, off) & ~0x3f; 3897 if (scsc & 0x30) 3898 val |= udma2_tbl[drvp->UDMA_mode]; 3899 else 3900 val |= udma_tbl[drvp->UDMA_mode]; 3901 pciide_pci_write(pc, pa, off, val); 3902 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3903 } else if (drvp->drive_flags & DRIVE_DMA) { 3904 mode |= 0x02 << (drive * 4); 3905 off = 0xa8 + chp->channel * 16 + drive * 2; 3906 val = dma_tbl[drvp->DMA_mode]; 3907 pciide_pci_write(pc, pa, off, val & 0xff); 3908 pciide_pci_write(pc, pa, off, val >> 8); 3909 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3910 } else { 3911 mode |= 0x01 << (drive * 4); 3912 off = 0xa4 + chp->channel * 16 + drive * 2; 3913 val = pio_tbl[drvp->PIO_mode]; 3914 pciide_pci_write(pc, pa, off, val & 0xff); 3915 pciide_pci_write(pc, pa, off, val >> 8); 3916 } 3917 } 3918 3919 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 3920 if (idedma_ctl != 0) { 3921 /* Add software bits in status register */ 3922 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3923 IDEDMA_CTL(chp->channel), 3924 idedma_ctl); 3925 } 3926 pciide_print_modes(cp); 3927 } 3928 3929 /* 3930 * When the Silicon Image 3112 retries a PCI memory read command, 3931 * it may retry it as a memory read multiple command under some 3932 * circumstances. This can totally confuse some PCI controllers, 3933 * so ensure that it will never do this by making sure that the 3934 * Read Threshold (FIFO Read Request Control) field of the FIFO 3935 * Valid Byte Count and Control registers for both channels (BA5 3936 * offset 0x40 and 0x44) are set to be at least as large as the 3937 * cacheline size register. 3938 */ 3939 void 3940 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 3941 { 3942 pcireg_t cls, reg40, reg44; 3943 3944 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 3945 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 3946 cls *= 4; 3947 if (cls > 224) { 3948 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 3949 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 3950 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 3951 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 3952 cls = 224; 3953 } 3954 if (cls < 32) 3955 cls = 32; 3956 cls = (cls + 31) / 32; 3957 reg40 = ba5_read_4(sc, 0x40); 3958 reg44 = ba5_read_4(sc, 0x44); 3959 if ((reg40 & 0x7) < cls) 3960 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 3961 if ((reg44 & 0x7) < cls) 3962 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 3963 } 3964 3965 void 3966 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3967 { 3968 struct pciide_channel *cp; 3969 bus_size_t cmdsize, ctlsize; 3970 pcireg_t interface, scs_cmd, cfgctl; 3971 int channel; 3972 struct pciide_satalink *sl; 3973 3974 /* Allocate memory for private data */ 3975 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 3976 sl = sc->sc_cookie; 3977 3978 sc->chip_unmap = default_chip_unmap; 3979 3980 #define SII3112_RESET_BITS \ 3981 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 3982 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 3983 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 3984 3985 /* 3986 * Reset everything and then unblock all of the interrupts. 3987 */ 3988 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 3989 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 3990 scs_cmd | SII3112_RESET_BITS); 3991 delay(50 * 1000); 3992 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 3993 scs_cmd & SCS_CMD_BA5_EN); 3994 delay(50 * 1000); 3995 3996 if (scs_cmd & SCS_CMD_BA5_EN) { 3997 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 3998 PCI_MAPREG_TYPE_MEM | 3999 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4000 &sl->ba5_st, &sl->ba5_sh, 4001 NULL, NULL, 0) != 0) 4002 printf(": unable to map BA5 register space\n"); 4003 else 4004 sl->ba5_en = 1; 4005 } else { 4006 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 4007 SII3112_PCI_CFGCTL); 4008 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 4009 cfgctl | CFGCTL_BA5INDEN); 4010 } 4011 4012 printf(": DMA"); 4013 pciide_mapreg_dma(sc, pa); 4014 printf("\n"); 4015 4016 /* 4017 * Rev. <= 0x01 of the 3112 have a bug that can cause data 4018 * corruption if DMA transfers cross an 8K boundary. This is 4019 * apparently hard to tickle, but we'll go ahead and play it 4020 * safe. 4021 */ 4022 if (sc->sc_rev <= 0x01) { 4023 sc->sc_dma_maxsegsz = 8192; 4024 sc->sc_dma_boundary = 8192; 4025 } 4026 4027 sii_fixup_cacheline(sc, pa); 4028 4029 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4030 sc->sc_wdcdev.PIO_cap = 4; 4031 if (sc->sc_dma_ok) { 4032 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4033 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4034 sc->sc_wdcdev.irqack = pciide_irqack; 4035 sc->sc_wdcdev.DMA_cap = 2; 4036 sc->sc_wdcdev.UDMA_cap = 6; 4037 } 4038 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4039 4040 /* We can use SControl and SStatus to probe for drives. */ 4041 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4042 4043 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4044 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4045 4046 /* 4047 * The 3112 either identifies itself as a RAID storage device 4048 * or a Misc storage device. Fake up the interface bits for 4049 * what our driver expects. 4050 */ 4051 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4052 interface = PCI_INTERFACE(pa->pa_class); 4053 } else { 4054 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4055 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4056 } 4057 4058 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4059 cp = &sc->pciide_channels[channel]; 4060 if (pciide_chansetup(sc, channel, interface) == 0) 4061 continue; 4062 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4063 pciide_pci_intr); 4064 if (cp->hw_ok == 0) 4065 continue; 4066 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4067 } 4068 } 4069 4070 void 4071 sii3112_setup_channel(struct channel_softc *chp) 4072 { 4073 struct ata_drive_datas *drvp; 4074 int drive; 4075 u_int32_t idedma_ctl, dtm; 4076 struct pciide_channel *cp = (struct pciide_channel *)chp; 4077 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4078 4079 /* setup DMA if needed */ 4080 pciide_channel_dma_setup(cp); 4081 4082 idedma_ctl = 0; 4083 dtm = 0; 4084 4085 for (drive = 0; drive < 2; drive++) { 4086 drvp = &chp->ch_drive[drive]; 4087 /* If no drive, skip */ 4088 if ((drvp->drive_flags & DRIVE) == 0) 4089 continue; 4090 if (drvp->drive_flags & DRIVE_UDMA) { 4091 /* use Ultra/DMA */ 4092 drvp->drive_flags &= ~DRIVE_DMA; 4093 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4094 dtm |= DTM_IDEx_DMA; 4095 } else if (drvp->drive_flags & DRIVE_DMA) { 4096 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4097 dtm |= DTM_IDEx_DMA; 4098 } else { 4099 dtm |= DTM_IDEx_PIO; 4100 } 4101 } 4102 4103 /* 4104 * Nothing to do to setup modes; it is meaningless in S-ATA 4105 * (but many S-ATA drives still want to get the SET_FEATURE 4106 * command). 4107 */ 4108 if (idedma_ctl != 0) { 4109 /* Add software bits in status register */ 4110 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 4111 } 4112 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 4113 pciide_print_modes(cp); 4114 } 4115 4116 void 4117 sii3112_drv_probe(struct channel_softc *chp) 4118 { 4119 struct pciide_channel *cp = (struct pciide_channel *)chp; 4120 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4121 uint32_t scontrol, sstatus; 4122 uint8_t scnt, sn, cl, ch; 4123 int i, s; 4124 4125 /* XXX This should be done by other code. */ 4126 for (i = 0; i < 2; i++) { 4127 chp->ch_drive[i].chnl_softc = chp; 4128 chp->ch_drive[i].drive = i; 4129 } 4130 4131 /* 4132 * The 3112 is a 2-port part, and only has one drive per channel 4133 * (each port emulates a master drive). 4134 * 4135 * The 3114 is similar, but has 4 channels. 4136 */ 4137 4138 /* 4139 * Request communication initialization sequence, any speed. 4140 * Performing this is the equivalent of an ATA Reset. 4141 */ 4142 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4143 4144 /* 4145 * XXX We don't yet support SATA power management; disable all 4146 * power management state transitions. 4147 */ 4148 scontrol |= SControl_IPM_NONE; 4149 4150 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4151 delay(50 * 1000); 4152 scontrol &= ~SControl_DET_INIT; 4153 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4154 delay(50 * 1000); 4155 4156 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4157 #if 0 4158 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4159 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4160 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4161 #endif 4162 switch (sstatus & SStatus_DET_mask) { 4163 case SStatus_DET_NODEV: 4164 /* No device; be silent. */ 4165 break; 4166 4167 case SStatus_DET_DEV_NE: 4168 printf("%s: port %d: device connected, but " 4169 "communication not established\n", 4170 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4171 break; 4172 4173 case SStatus_DET_OFFLINE: 4174 printf("%s: port %d: PHY offline\n", 4175 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4176 break; 4177 4178 case SStatus_DET_DEV: 4179 /* 4180 * XXX ATAPI detection doesn't currently work. Don't 4181 * XXX know why. But, it's not like the standard method 4182 * XXX can detect an ATAPI device connected via a SATA/PATA 4183 * XXX bridge, so at least this is no worse. --thorpej 4184 */ 4185 if (chp->_vtbl != NULL) 4186 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4187 else 4188 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4189 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4190 delay(10); /* 400ns delay */ 4191 /* Save register contents. */ 4192 if (chp->_vtbl != NULL) { 4193 scnt = CHP_READ_REG(chp, wdr_seccnt); 4194 sn = CHP_READ_REG(chp, wdr_sector); 4195 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4196 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4197 } else { 4198 scnt = bus_space_read_1(chp->cmd_iot, 4199 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4200 sn = bus_space_read_1(chp->cmd_iot, 4201 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4202 cl = bus_space_read_1(chp->cmd_iot, 4203 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4204 ch = bus_space_read_1(chp->cmd_iot, 4205 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4206 } 4207 #if 0 4208 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4209 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4210 scnt, sn, cl, ch); 4211 #endif 4212 /* 4213 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4214 * cases we get wrong values here, so ignore it. 4215 */ 4216 s = splbio(); 4217 if (cl == 0x14 && ch == 0xeb) 4218 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4219 else 4220 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4221 splx(s); 4222 4223 printf("%s: port %d: device present", 4224 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4225 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4226 case 1: 4227 printf(", speed: 1.5Gb/s"); 4228 break; 4229 case 2: 4230 printf(", speed: 3.0Gb/s"); 4231 break; 4232 } 4233 printf("\n"); 4234 break; 4235 4236 default: 4237 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4238 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4239 } 4240 } 4241 4242 void 4243 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4244 { 4245 struct pciide_channel *cp; 4246 pcireg_t scs_cmd; 4247 pci_intr_handle_t intrhandle; 4248 const char *intrstr; 4249 int channel; 4250 struct pciide_satalink *sl; 4251 4252 /* Allocate memory for private data */ 4253 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4254 sl = sc->sc_cookie; 4255 4256 #define SII3114_RESET_BITS \ 4257 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4258 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4259 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4260 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4261 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4262 4263 /* 4264 * Reset everything and then unblock all of the interrupts. 4265 */ 4266 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4267 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4268 scs_cmd | SII3114_RESET_BITS); 4269 delay(50 * 1000); 4270 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4271 scs_cmd & SCS_CMD_M66EN); 4272 delay(50 * 1000); 4273 4274 /* 4275 * On the 3114, the BA5 register space is always enabled. In 4276 * order to use the 3114 in any sane way, we must use this BA5 4277 * register space, and so we consider it an error if we cannot 4278 * map it. 4279 * 4280 * As a consequence of using BA5, our register mapping is different 4281 * from a normal PCI IDE controller's, and so we are unable to use 4282 * most of the common PCI IDE register mapping functions. 4283 */ 4284 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4285 PCI_MAPREG_TYPE_MEM | 4286 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4287 &sl->ba5_st, &sl->ba5_sh, 4288 NULL, NULL, 0) != 0) { 4289 printf(": unable to map BA5 register space\n"); 4290 return; 4291 } 4292 sl->ba5_en = 1; 4293 4294 /* 4295 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4296 * channel 2. This is required at all times for proper operation 4297 * when using the BA5 register space (otherwise interrupts from 4298 * all 4 channels won't work). 4299 */ 4300 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4301 4302 printf(": DMA"); 4303 sii3114_mapreg_dma(sc, pa); 4304 printf("\n"); 4305 4306 sii_fixup_cacheline(sc, pa); 4307 4308 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4309 sc->sc_wdcdev.PIO_cap = 4; 4310 if (sc->sc_dma_ok) { 4311 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4312 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4313 sc->sc_wdcdev.irqack = pciide_irqack; 4314 sc->sc_wdcdev.DMA_cap = 2; 4315 sc->sc_wdcdev.UDMA_cap = 6; 4316 } 4317 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4318 4319 /* We can use SControl and SStatus to probe for drives. */ 4320 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4321 4322 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4323 sc->sc_wdcdev.nchannels = 4; 4324 4325 /* Map and establish the interrupt handler. */ 4326 if (pci_intr_map(pa, &intrhandle) != 0) { 4327 printf("%s: couldn't map native-PCI interrupt\n", 4328 sc->sc_wdcdev.sc_dev.dv_xname); 4329 return; 4330 } 4331 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4332 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4333 /* XXX */ 4334 pciide_pci_intr, sc, 4335 sc->sc_wdcdev.sc_dev.dv_xname); 4336 if (sc->sc_pci_ih != NULL) { 4337 printf("%s: using %s for native-PCI interrupt\n", 4338 sc->sc_wdcdev.sc_dev.dv_xname, 4339 intrstr ? intrstr : "unknown interrupt"); 4340 } else { 4341 printf("%s: couldn't establish native-PCI interrupt", 4342 sc->sc_wdcdev.sc_dev.dv_xname); 4343 if (intrstr != NULL) 4344 printf(" at %s", intrstr); 4345 printf("\n"); 4346 return; 4347 } 4348 4349 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4350 cp = &sc->pciide_channels[channel]; 4351 if (sii3114_chansetup(sc, channel) == 0) 4352 continue; 4353 sii3114_mapchan(cp); 4354 if (cp->hw_ok == 0) 4355 continue; 4356 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4357 } 4358 } 4359 4360 void 4361 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4362 { 4363 int chan, reg; 4364 bus_size_t size; 4365 struct pciide_satalink *sl = sc->sc_cookie; 4366 4367 sc->sc_wdcdev.dma_arg = sc; 4368 sc->sc_wdcdev.dma_init = pciide_dma_init; 4369 sc->sc_wdcdev.dma_start = pciide_dma_start; 4370 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4371 4372 /* 4373 * Slice off a subregion of BA5 for each of the channel's DMA 4374 * registers. 4375 */ 4376 4377 sc->sc_dma_iot = sl->ba5_st; 4378 for (chan = 0; chan < 4; chan++) { 4379 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4380 size = 4; 4381 if (size > (IDEDMA_SCH_OFFSET - reg)) 4382 size = IDEDMA_SCH_OFFSET - reg; 4383 if (bus_space_subregion(sl->ba5_st, 4384 sl->ba5_sh, 4385 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4386 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4387 sc->sc_dma_ok = 0; 4388 printf(": can't subregion offset " 4389 "%lu size %lu", 4390 (u_long) satalink_ba5_regmap[ 4391 chan].ba5_IDEDMA_CMD + reg, 4392 (u_long) size); 4393 return; 4394 } 4395 } 4396 } 4397 4398 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4399 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4400 sc->sc_dmactl_read = sii3114_dmactl_read; 4401 sc->sc_dmactl_write = sii3114_dmactl_write; 4402 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4403 4404 /* DMA registers all set up! */ 4405 sc->sc_dmat = pa->pa_dmat; 4406 sc->sc_dma_ok = 1; 4407 } 4408 4409 int 4410 sii3114_chansetup(struct pciide_softc *sc, int channel) 4411 { 4412 static const char *channel_names[] = { 4413 "port 0", 4414 "port 1", 4415 "port 2", 4416 "port 3", 4417 }; 4418 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4419 4420 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4421 4422 /* 4423 * We must always keep the Interrupt Steering bit set in channel 2's 4424 * IDEDMA_CMD register. 4425 */ 4426 if (channel == 2) 4427 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4428 4429 cp->name = channel_names[channel]; 4430 cp->wdc_channel.channel = channel; 4431 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4432 cp->wdc_channel.ch_queue = 4433 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 4434 if (cp->wdc_channel.ch_queue == NULL) { 4435 printf("%s %s channel: " 4436 "can't allocate memory for command queue", 4437 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4438 return (0); 4439 } 4440 return (1); 4441 } 4442 4443 void 4444 sii3114_mapchan(struct pciide_channel *cp) 4445 { 4446 struct channel_softc *wdc_cp = &cp->wdc_channel; 4447 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4448 struct pciide_satalink *sl = sc->sc_cookie; 4449 int chan = wdc_cp->channel; 4450 int i; 4451 4452 cp->hw_ok = 0; 4453 cp->compat = 0; 4454 cp->ih = sc->sc_pci_ih; 4455 4456 sl->regs[chan].cmd_iot = sl->ba5_st; 4457 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4458 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4459 9, &sl->regs[chan].cmd_baseioh) != 0) { 4460 printf("%s: couldn't subregion %s cmd base\n", 4461 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4462 return; 4463 } 4464 4465 sl->regs[chan].ctl_iot = sl->ba5_st; 4466 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4467 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4468 1, &cp->ctl_baseioh) != 0) { 4469 printf("%s: couldn't subregion %s ctl base\n", 4470 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4471 return; 4472 } 4473 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4474 4475 for (i = 0; i < WDC_NREG; i++) { 4476 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4477 sl->regs[chan].cmd_baseioh, 4478 i, i == 0 ? 4 : 1, 4479 &sl->regs[chan].cmd_iohs[i]) != 0) { 4480 printf("%s: couldn't subregion %s channel " 4481 "cmd regs\n", 4482 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4483 return; 4484 } 4485 } 4486 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4487 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4488 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4489 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4490 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4491 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4492 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4493 wdcattach(wdc_cp); 4494 cp->hw_ok = 1; 4495 } 4496 4497 u_int8_t 4498 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4499 { 4500 struct pciide_channel *cp = (struct pciide_channel *)chp; 4501 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4502 struct pciide_satalink *sl = sc->sc_cookie; 4503 4504 if (reg & _WDC_AUX) 4505 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4506 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4507 else 4508 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4509 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4510 } 4511 4512 void 4513 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4514 { 4515 struct pciide_channel *cp = (struct pciide_channel *)chp; 4516 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4517 struct pciide_satalink *sl = sc->sc_cookie; 4518 4519 if (reg & _WDC_AUX) 4520 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4521 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4522 else 4523 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4524 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4525 0, val); 4526 } 4527 4528 u_int8_t 4529 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4530 { 4531 struct pciide_satalink *sl = sc->sc_cookie; 4532 4533 return (bus_space_read_1(sc->sc_dma_iot, 4534 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4535 } 4536 4537 void 4538 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4539 { 4540 struct pciide_satalink *sl = sc->sc_cookie; 4541 4542 bus_space_write_1(sc->sc_dma_iot, 4543 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4544 } 4545 4546 u_int8_t 4547 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4548 { 4549 struct pciide_satalink *sl = sc->sc_cookie; 4550 4551 return (bus_space_read_1(sc->sc_dma_iot, 4552 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4553 } 4554 4555 void 4556 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4557 { 4558 struct pciide_satalink *sl = sc->sc_cookie; 4559 4560 bus_space_write_1(sc->sc_dma_iot, 4561 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4562 } 4563 4564 void 4565 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4566 { 4567 struct pciide_satalink *sl = sc->sc_cookie; 4568 4569 bus_space_write_4(sc->sc_dma_iot, 4570 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4571 } 4572 4573 void 4574 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4575 { 4576 struct pciide_channel *cp; 4577 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4578 bus_size_t cmdsize, ctlsize; 4579 struct pciide_cy *cy; 4580 4581 /* Allocate memory for private data */ 4582 sc->sc_cookie = malloc(sizeof(*cy), M_DEVBUF, M_NOWAIT | M_ZERO); 4583 cy = sc->sc_cookie; 4584 4585 /* 4586 * this chip has 2 PCI IDE functions, one for primary and one for 4587 * secondary. So we need to call pciide_mapregs_compat() with 4588 * the real channel 4589 */ 4590 if (pa->pa_function == 1) { 4591 cy->cy_compatchan = 0; 4592 } else if (pa->pa_function == 2) { 4593 cy->cy_compatchan = 1; 4594 } else { 4595 printf(": unexpected PCI function %d\n", pa->pa_function); 4596 return; 4597 } 4598 4599 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4600 printf(": DMA"); 4601 pciide_mapreg_dma(sc, pa); 4602 } else { 4603 printf(": no DMA"); 4604 sc->sc_dma_ok = 0; 4605 } 4606 4607 cy->cy_handle = cy82c693_init(pa->pa_iot); 4608 if (cy->cy_handle == NULL) { 4609 printf(", (unable to map ctl registers)"); 4610 sc->sc_dma_ok = 0; 4611 } 4612 4613 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4614 WDC_CAPABILITY_MODE; 4615 if (sc->sc_dma_ok) { 4616 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4617 sc->sc_wdcdev.irqack = pciide_irqack; 4618 } 4619 sc->sc_wdcdev.PIO_cap = 4; 4620 sc->sc_wdcdev.DMA_cap = 2; 4621 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4622 4623 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4624 sc->sc_wdcdev.nchannels = 1; 4625 4626 /* Only one channel for this chip; if we are here it's enabled */ 4627 cp = &sc->pciide_channels[0]; 4628 sc->wdc_chanarray[0] = &cp->wdc_channel; 4629 cp->name = PCIIDE_CHANNEL_NAME(0); 4630 cp->wdc_channel.channel = 0; 4631 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4632 cp->wdc_channel.ch_queue = 4633 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 4634 if (cp->wdc_channel.ch_queue == NULL) { 4635 printf(": cannot allocate memory for command queue\n"); 4636 return; 4637 } 4638 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4639 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4640 "configured" : "wired"); 4641 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4642 printf("native-PCI\n"); 4643 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4644 pciide_pci_intr); 4645 } else { 4646 printf("compatibility\n"); 4647 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4648 &cmdsize, &ctlsize); 4649 } 4650 4651 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4652 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4653 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4654 if (cp->hw_ok == 0) 4655 return; 4656 wdcattach(&cp->wdc_channel); 4657 if (pciide_chan_candisable(cp)) { 4658 pci_conf_write(sc->sc_pc, sc->sc_tag, 4659 PCI_COMMAND_STATUS_REG, 0); 4660 } 4661 if (cp->hw_ok == 0) { 4662 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4663 interface); 4664 return; 4665 } 4666 4667 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4668 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4669 cy693_setup_channel(&cp->wdc_channel); 4670 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4671 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4672 } 4673 4674 void 4675 cy693_setup_channel(struct channel_softc *chp) 4676 { 4677 struct ata_drive_datas *drvp; 4678 int drive; 4679 u_int32_t cy_cmd_ctrl; 4680 u_int32_t idedma_ctl; 4681 struct pciide_channel *cp = (struct pciide_channel *)chp; 4682 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4683 int dma_mode = -1; 4684 struct pciide_cy *cy = sc->sc_cookie; 4685 4686 cy_cmd_ctrl = idedma_ctl = 0; 4687 4688 /* setup DMA if needed */ 4689 pciide_channel_dma_setup(cp); 4690 4691 for (drive = 0; drive < 2; drive++) { 4692 drvp = &chp->ch_drive[drive]; 4693 /* If no drive, skip */ 4694 if ((drvp->drive_flags & DRIVE) == 0) 4695 continue; 4696 /* add timing values, setup DMA if needed */ 4697 if (drvp->drive_flags & DRIVE_DMA) { 4698 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4699 /* use Multiword DMA */ 4700 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 4701 dma_mode = drvp->DMA_mode; 4702 } 4703 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4704 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 4705 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4706 CY_CMD_CTRL_IOW_REC_OFF(drive)); 4707 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4708 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 4709 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4710 CY_CMD_CTRL_IOR_REC_OFF(drive)); 4711 } 4712 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 4713 chp->ch_drive[0].DMA_mode = dma_mode; 4714 chp->ch_drive[1].DMA_mode = dma_mode; 4715 4716 if (dma_mode == -1) 4717 dma_mode = 0; 4718 4719 if (cy->cy_handle != NULL) { 4720 /* Note: `multiple' is implied. */ 4721 cy82c693_write(cy->cy_handle, 4722 (cy->cy_compatchan == 0) ? 4723 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 4724 } 4725 4726 pciide_print_modes(cp); 4727 4728 if (idedma_ctl != 0) { 4729 /* Add software bits in status register */ 4730 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4731 IDEDMA_CTL(chp->channel), idedma_ctl); 4732 } 4733 } 4734 4735 static struct sis_hostbr_type { 4736 u_int16_t id; 4737 u_int8_t rev; 4738 u_int8_t udma_mode; 4739 char *name; 4740 u_int8_t type; 4741 #define SIS_TYPE_NOUDMA 0 4742 #define SIS_TYPE_66 1 4743 #define SIS_TYPE_100OLD 2 4744 #define SIS_TYPE_100NEW 3 4745 #define SIS_TYPE_133OLD 4 4746 #define SIS_TYPE_133NEW 5 4747 #define SIS_TYPE_SOUTH 6 4748 } sis_hostbr_type[] = { 4749 /* Most infos here are from sos@freebsd.org */ 4750 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 4751 #if 0 4752 /* 4753 * controllers associated to a rev 0x2 530 Host to PCI Bridge 4754 * have problems with UDMA (info provided by Christos) 4755 */ 4756 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 4757 #endif 4758 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 4759 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 4760 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 4761 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 4762 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 4763 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 4764 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 4765 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 4766 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 4767 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 4768 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 4769 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 4770 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 4771 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 4772 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 4773 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 4774 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 4775 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 4776 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 4777 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 4778 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 4779 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 4780 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 4781 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 4782 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 4783 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 4784 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 4785 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 4786 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 4787 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 4788 /* 4789 * From sos@freebsd.org: the 0x961 ID will never be found in real world 4790 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 4791 */ 4792 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 4793 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 4794 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 4795 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW} 4796 }; 4797 4798 static struct sis_hostbr_type *sis_hostbr_type_match; 4799 4800 int 4801 sis_hostbr_match(struct pci_attach_args *pa) 4802 { 4803 int i; 4804 4805 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 4806 return (0); 4807 sis_hostbr_type_match = NULL; 4808 for (i = 0; 4809 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 4810 i++) { 4811 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 4812 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 4813 sis_hostbr_type_match = &sis_hostbr_type[i]; 4814 } 4815 return (sis_hostbr_type_match != NULL); 4816 } 4817 4818 int 4819 sis_south_match(struct pci_attach_args *pa) 4820 { 4821 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 4822 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 4823 PCI_REVISION(pa->pa_class) >= 0x10); 4824 } 4825 4826 void 4827 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4828 { 4829 struct pciide_channel *cp; 4830 int channel; 4831 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 4832 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4833 int rev = sc->sc_rev; 4834 bus_size_t cmdsize, ctlsize; 4835 struct pciide_sis *sis; 4836 4837 /* Allocate memory for private data */ 4838 sc->sc_cookie = malloc(sizeof(*sis), M_DEVBUF, M_NOWAIT | M_ZERO); 4839 sis = sc->sc_cookie; 4840 4841 pci_find_device(NULL, sis_hostbr_match); 4842 4843 if (sis_hostbr_type_match) { 4844 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 4845 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 4846 pciide_pci_read(sc->sc_pc, sc->sc_tag, 4847 SIS_REG_57) & 0x7f); 4848 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 4849 sis->sis_type = SIS_TYPE_133NEW; 4850 sc->sc_wdcdev.UDMA_cap = 4851 sis_hostbr_type_match->udma_mode; 4852 } else { 4853 if (pci_find_device(NULL, sis_south_match)) { 4854 sis->sis_type = SIS_TYPE_133OLD; 4855 sc->sc_wdcdev.UDMA_cap = 4856 sis_hostbr_type_match->udma_mode; 4857 } else { 4858 sis->sis_type = SIS_TYPE_100NEW; 4859 sc->sc_wdcdev.UDMA_cap = 4860 sis_hostbr_type_match->udma_mode; 4861 } 4862 } 4863 } else { 4864 sis->sis_type = sis_hostbr_type_match->type; 4865 sc->sc_wdcdev.UDMA_cap = 4866 sis_hostbr_type_match->udma_mode; 4867 } 4868 printf(": %s", sis_hostbr_type_match->name); 4869 } else { 4870 printf(": 5597/5598"); 4871 if (rev >= 0xd0) { 4872 sc->sc_wdcdev.UDMA_cap = 2; 4873 sis->sis_type = SIS_TYPE_66; 4874 } else { 4875 sc->sc_wdcdev.UDMA_cap = 0; 4876 sis->sis_type = SIS_TYPE_NOUDMA; 4877 } 4878 } 4879 4880 printf(": DMA"); 4881 pciide_mapreg_dma(sc, pa); 4882 4883 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4884 WDC_CAPABILITY_MODE; 4885 if (sc->sc_dma_ok) { 4886 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4887 sc->sc_wdcdev.irqack = pciide_irqack; 4888 if (sis->sis_type >= SIS_TYPE_66) 4889 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4890 } 4891 4892 sc->sc_wdcdev.PIO_cap = 4; 4893 sc->sc_wdcdev.DMA_cap = 2; 4894 4895 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4896 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4897 switch (sis->sis_type) { 4898 case SIS_TYPE_NOUDMA: 4899 case SIS_TYPE_66: 4900 case SIS_TYPE_100OLD: 4901 sc->sc_wdcdev.set_modes = sis_setup_channel; 4902 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 4903 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 4904 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 4905 break; 4906 case SIS_TYPE_100NEW: 4907 case SIS_TYPE_133OLD: 4908 sc->sc_wdcdev.set_modes = sis_setup_channel; 4909 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 4910 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 4911 break; 4912 case SIS_TYPE_133NEW: 4913 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 4914 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 4915 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 4916 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 4917 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 4918 break; 4919 } 4920 4921 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4922 4923 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4924 cp = &sc->pciide_channels[channel]; 4925 if (pciide_chansetup(sc, channel, interface) == 0) 4926 continue; 4927 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 4928 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 4929 printf("%s: %s ignored (disabled)\n", 4930 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4931 continue; 4932 } 4933 pciide_map_compat_intr(pa, cp, channel, interface); 4934 if (cp->hw_ok == 0) 4935 continue; 4936 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4937 pciide_pci_intr); 4938 if (cp->hw_ok == 0) { 4939 pciide_unmap_compat_intr(pa, cp, channel, interface); 4940 continue; 4941 } 4942 if (pciide_chan_candisable(cp)) { 4943 if (channel == 0) 4944 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 4945 else 4946 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 4947 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 4948 sis_ctr0); 4949 } 4950 if (cp->hw_ok == 0) { 4951 pciide_unmap_compat_intr(pa, cp, channel, interface); 4952 continue; 4953 } 4954 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4955 } 4956 } 4957 4958 void 4959 sis96x_setup_channel(struct channel_softc *chp) 4960 { 4961 struct ata_drive_datas *drvp; 4962 int drive; 4963 u_int32_t sis_tim; 4964 u_int32_t idedma_ctl; 4965 int regtim; 4966 struct pciide_channel *cp = (struct pciide_channel *)chp; 4967 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4968 4969 sis_tim = 0; 4970 idedma_ctl = 0; 4971 /* setup DMA if needed */ 4972 pciide_channel_dma_setup(cp); 4973 4974 for (drive = 0; drive < 2; drive++) { 4975 regtim = SIS_TIM133( 4976 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 4977 chp->channel, drive); 4978 drvp = &chp->ch_drive[drive]; 4979 /* If no drive, skip */ 4980 if ((drvp->drive_flags & DRIVE) == 0) 4981 continue; 4982 /* add timing values, setup DMA if needed */ 4983 if (drvp->drive_flags & DRIVE_UDMA) { 4984 /* use Ultra/DMA */ 4985 drvp->drive_flags &= ~DRIVE_DMA; 4986 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 4987 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 4988 if (drvp->UDMA_mode > 2) 4989 drvp->UDMA_mode = 2; 4990 } 4991 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 4992 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 4993 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4994 } else if (drvp->drive_flags & DRIVE_DMA) { 4995 /* 4996 * use Multiword DMA 4997 * Timings will be used for both PIO and DMA, 4998 * so adjust DMA mode if needed 4999 */ 5000 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5001 drvp->PIO_mode = drvp->DMA_mode + 2; 5002 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5003 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5004 drvp->PIO_mode - 2 : 0; 5005 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 5006 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5007 } else { 5008 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5009 } 5010 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 5011 "channel %d drive %d: 0x%x (reg 0x%x)\n", 5012 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 5013 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 5014 } 5015 if (idedma_ctl != 0) { 5016 /* Add software bits in status register */ 5017 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5018 IDEDMA_CTL(chp->channel), idedma_ctl); 5019 } 5020 pciide_print_modes(cp); 5021 } 5022 5023 void 5024 sis_setup_channel(struct channel_softc *chp) 5025 { 5026 struct ata_drive_datas *drvp; 5027 int drive; 5028 u_int32_t sis_tim; 5029 u_int32_t idedma_ctl; 5030 struct pciide_channel *cp = (struct pciide_channel *)chp; 5031 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5032 struct pciide_sis *sis = sc->sc_cookie; 5033 5034 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 5035 "channel %d 0x%x\n", chp->channel, 5036 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 5037 DEBUG_PROBE); 5038 sis_tim = 0; 5039 idedma_ctl = 0; 5040 /* setup DMA if needed */ 5041 pciide_channel_dma_setup(cp); 5042 5043 for (drive = 0; drive < 2; drive++) { 5044 drvp = &chp->ch_drive[drive]; 5045 /* If no drive, skip */ 5046 if ((drvp->drive_flags & DRIVE) == 0) 5047 continue; 5048 /* add timing values, setup DMA if needed */ 5049 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5050 (drvp->drive_flags & DRIVE_UDMA) == 0) 5051 goto pio; 5052 5053 if (drvp->drive_flags & DRIVE_UDMA) { 5054 /* use Ultra/DMA */ 5055 drvp->drive_flags &= ~DRIVE_DMA; 5056 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5057 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 5058 if (drvp->UDMA_mode > 2) 5059 drvp->UDMA_mode = 2; 5060 } 5061 switch (sis->sis_type) { 5062 case SIS_TYPE_66: 5063 case SIS_TYPE_100OLD: 5064 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 5065 SIS_TIM66_UDMA_TIME_OFF(drive); 5066 break; 5067 case SIS_TYPE_100NEW: 5068 sis_tim |= 5069 sis_udma100new_tim[drvp->UDMA_mode] << 5070 SIS_TIM100_UDMA_TIME_OFF(drive); 5071 break; 5072 case SIS_TYPE_133OLD: 5073 sis_tim |= 5074 sis_udma133old_tim[drvp->UDMA_mode] << 5075 SIS_TIM100_UDMA_TIME_OFF(drive); 5076 break; 5077 default: 5078 printf("unknown SiS IDE type %d\n", 5079 sis->sis_type); 5080 } 5081 } else { 5082 /* 5083 * use Multiword DMA 5084 * Timings will be used for both PIO and DMA, 5085 * so adjust DMA mode if needed 5086 */ 5087 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5088 drvp->PIO_mode = drvp->DMA_mode + 2; 5089 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5090 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5091 drvp->PIO_mode - 2 : 0; 5092 if (drvp->DMA_mode == 0) 5093 drvp->PIO_mode = 0; 5094 } 5095 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5096 pio: switch (sis->sis_type) { 5097 case SIS_TYPE_NOUDMA: 5098 case SIS_TYPE_66: 5099 case SIS_TYPE_100OLD: 5100 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5101 SIS_TIM66_ACT_OFF(drive); 5102 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5103 SIS_TIM66_REC_OFF(drive); 5104 break; 5105 case SIS_TYPE_100NEW: 5106 case SIS_TYPE_133OLD: 5107 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5108 SIS_TIM100_ACT_OFF(drive); 5109 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5110 SIS_TIM100_REC_OFF(drive); 5111 break; 5112 default: 5113 printf("unknown SiS IDE type %d\n", 5114 sis->sis_type); 5115 } 5116 } 5117 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 5118 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 5119 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 5120 if (idedma_ctl != 0) { 5121 /* Add software bits in status register */ 5122 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5123 IDEDMA_CTL(chp->channel), idedma_ctl); 5124 } 5125 pciide_print_modes(cp); 5126 } 5127 5128 void 5129 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5130 { 5131 struct pciide_channel *cp; 5132 int channel; 5133 pcireg_t interface, ctl; 5134 bus_size_t cmdsize, ctlsize; 5135 5136 printf(": DMA"); 5137 pciide_mapreg_dma(sc, pa); 5138 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 5139 5140 if (sc->sc_dma_ok) { 5141 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5142 sc->sc_wdcdev.irqack = natsemi_irqack; 5143 } 5144 5145 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5146 5147 /* 5148 * Mask off interrupts from both channels, appropriate channel(s) 5149 * will be unmasked later. 5150 */ 5151 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5152 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5153 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5154 5155 sc->sc_wdcdev.PIO_cap = 4; 5156 sc->sc_wdcdev.DMA_cap = 2; 5157 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5158 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5159 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5160 5161 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5162 PCI_CLASS_REG)); 5163 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5164 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5165 5166 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5167 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5168 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5169 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5170 else 5171 ctl |= NATSEMI_CTRL1_INTAMASK; 5172 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5173 5174 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5175 cp = &sc->pciide_channels[channel]; 5176 if (pciide_chansetup(sc, channel, interface) == 0) 5177 continue; 5178 5179 pciide_map_compat_intr(pa, cp, channel, interface); 5180 if (cp->hw_ok == 0) 5181 continue; 5182 5183 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5184 natsemi_pci_intr); 5185 if (cp->hw_ok == 0) { 5186 pciide_unmap_compat_intr(pa, cp, channel, interface); 5187 continue; 5188 } 5189 natsemi_setup_channel(&cp->wdc_channel); 5190 } 5191 } 5192 5193 void 5194 natsemi_setup_channel(struct channel_softc *chp) 5195 { 5196 struct ata_drive_datas *drvp; 5197 int drive, ndrives = 0; 5198 u_int32_t idedma_ctl = 0; 5199 struct pciide_channel *cp = (struct pciide_channel *)chp; 5200 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5201 u_int8_t tim; 5202 5203 /* setup DMA if needed */ 5204 pciide_channel_dma_setup(cp); 5205 5206 for (drive = 0; drive < 2; drive++) { 5207 drvp = &chp->ch_drive[drive]; 5208 /* If no drive, skip */ 5209 if ((drvp->drive_flags & DRIVE) == 0) 5210 continue; 5211 5212 ndrives++; 5213 /* add timing values, setup DMA if needed */ 5214 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5215 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5216 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5217 } else { 5218 /* 5219 * use Multiword DMA 5220 * Timings will be used for both PIO and DMA, 5221 * so adjust DMA mode if needed 5222 */ 5223 if (drvp->PIO_mode >= 3 && 5224 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5225 drvp->DMA_mode = drvp->PIO_mode - 2; 5226 } 5227 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5228 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5229 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5230 } 5231 5232 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5233 NATSEMI_RTREG(chp->channel, drive), tim); 5234 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5235 NATSEMI_WTREG(chp->channel, drive), tim); 5236 } 5237 if (idedma_ctl != 0) { 5238 /* Add software bits in status register */ 5239 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5240 IDEDMA_CTL(chp->channel), idedma_ctl); 5241 } 5242 if (ndrives > 0) { 5243 /* Unmask the channel if at least one drive is found */ 5244 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5245 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5246 ~(NATSEMI_CHMASK(chp->channel))); 5247 } 5248 5249 pciide_print_modes(cp); 5250 5251 /* Go ahead and ack interrupts generated during probe. */ 5252 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5253 IDEDMA_CTL(chp->channel), 5254 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5255 IDEDMA_CTL(chp->channel))); 5256 } 5257 5258 void 5259 natsemi_irqack(struct channel_softc *chp) 5260 { 5261 struct pciide_channel *cp = (struct pciide_channel *)chp; 5262 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5263 u_int8_t clr; 5264 5265 /* The "clear" bits are in the wrong register *sigh* */ 5266 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5267 IDEDMA_CMD(chp->channel)); 5268 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5269 IDEDMA_CTL(chp->channel)) & 5270 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5271 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5272 IDEDMA_CMD(chp->channel), clr); 5273 } 5274 5275 int 5276 natsemi_pci_intr(void *arg) 5277 { 5278 struct pciide_softc *sc = arg; 5279 struct pciide_channel *cp; 5280 struct channel_softc *wdc_cp; 5281 int i, rv, crv; 5282 u_int8_t msk; 5283 5284 rv = 0; 5285 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5286 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5287 cp = &sc->pciide_channels[i]; 5288 wdc_cp = &cp->wdc_channel; 5289 5290 /* If a compat channel skip. */ 5291 if (cp->compat) 5292 continue; 5293 5294 /* If this channel is masked, skip it. */ 5295 if (msk & NATSEMI_CHMASK(i)) 5296 continue; 5297 5298 if (pciide_intr_flag(cp) == 0) 5299 continue; 5300 5301 crv = wdcintr(wdc_cp); 5302 if (crv == 0) 5303 ; /* leave rv alone */ 5304 else if (crv == 1) 5305 rv = 1; /* claim the intr */ 5306 else if (rv == 0) /* crv should be -1 in this case */ 5307 rv = crv; /* if we've done no better, take it */ 5308 } 5309 return (rv); 5310 } 5311 5312 void 5313 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5314 { 5315 struct pciide_channel *cp; 5316 int channel; 5317 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5318 bus_size_t cmdsize, ctlsize; 5319 5320 printf(": DMA"); 5321 pciide_mapreg_dma(sc, pa); 5322 5323 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5324 WDC_CAPABILITY_MODE; 5325 if (sc->sc_dma_ok) { 5326 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5327 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5328 sc->sc_wdcdev.irqack = pciide_irqack; 5329 } 5330 sc->sc_wdcdev.PIO_cap = 4; 5331 sc->sc_wdcdev.DMA_cap = 2; 5332 sc->sc_wdcdev.UDMA_cap = 2; 5333 5334 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5335 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5336 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5337 5338 /* 5339 * Soekris net4801 errata 0003: 5340 * 5341 * The SC1100 built in busmaster IDE controller is pretty standard, 5342 * but have two bugs: data transfers need to be dword aligned and 5343 * it cannot do an exact 64Kbyte data transfer. 5344 * 5345 * Assume that reducing maximum segment size by one page 5346 * will be enough, and restrict boundary too for extra certainty. 5347 */ 5348 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5349 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5350 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5351 } 5352 5353 /* 5354 * This chip seems to be unable to do one-sector transfers 5355 * using DMA. 5356 */ 5357 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5358 5359 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5360 5361 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5362 cp = &sc->pciide_channels[channel]; 5363 if (pciide_chansetup(sc, channel, interface) == 0) 5364 continue; 5365 pciide_map_compat_intr(pa, cp, channel, interface); 5366 if (cp->hw_ok == 0) 5367 continue; 5368 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5369 pciide_pci_intr); 5370 if (cp->hw_ok == 0) { 5371 pciide_unmap_compat_intr(pa, cp, channel, interface); 5372 continue; 5373 } 5374 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5375 } 5376 } 5377 5378 void 5379 ns_scx200_setup_channel(struct channel_softc *chp) 5380 { 5381 struct ata_drive_datas *drvp; 5382 int drive, mode; 5383 u_int32_t idedma_ctl; 5384 struct pciide_channel *cp = (struct pciide_channel*)chp; 5385 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5386 int channel = chp->channel; 5387 int pioformat; 5388 pcireg_t piotim, dmatim; 5389 5390 /* Setup DMA if needed */ 5391 pciide_channel_dma_setup(cp); 5392 5393 idedma_ctl = 0; 5394 5395 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5396 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5397 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5398 DEBUG_PROBE); 5399 5400 /* Per channel settings */ 5401 for (drive = 0; drive < 2; drive++) { 5402 drvp = &chp->ch_drive[drive]; 5403 5404 /* If no drive, skip */ 5405 if ((drvp->drive_flags & DRIVE) == 0) 5406 continue; 5407 5408 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5409 SCx200_TIM_PIO(channel, drive)); 5410 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5411 SCx200_TIM_DMA(channel, drive)); 5412 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5413 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5414 piotim, dmatim), DEBUG_PROBE); 5415 5416 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5417 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5418 /* Setup UltraDMA mode */ 5419 drvp->drive_flags &= ~DRIVE_DMA; 5420 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5421 dmatim = scx200_udma33[drvp->UDMA_mode]; 5422 mode = drvp->PIO_mode; 5423 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5424 (drvp->drive_flags & DRIVE_DMA) != 0) { 5425 /* Setup multiword DMA mode */ 5426 drvp->drive_flags &= ~DRIVE_UDMA; 5427 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5428 dmatim = scx200_dma33[drvp->DMA_mode]; 5429 5430 /* mode = min(pio, dma + 2) */ 5431 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5432 mode = drvp->PIO_mode; 5433 else 5434 mode = drvp->DMA_mode + 2; 5435 } else { 5436 mode = drvp->PIO_mode; 5437 } 5438 5439 /* Setup PIO mode */ 5440 drvp->PIO_mode = mode; 5441 if (mode < 2) 5442 drvp->DMA_mode = 0; 5443 else 5444 drvp->DMA_mode = mode - 2; 5445 5446 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5447 5448 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5449 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5450 piotim, dmatim), DEBUG_PROBE); 5451 5452 pci_conf_write(sc->sc_pc, sc->sc_tag, 5453 SCx200_TIM_PIO(channel, drive), piotim); 5454 pci_conf_write(sc->sc_pc, sc->sc_tag, 5455 SCx200_TIM_DMA(channel, drive), dmatim); 5456 } 5457 5458 if (idedma_ctl != 0) { 5459 /* Add software bits in status register */ 5460 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5461 IDEDMA_CTL(channel), idedma_ctl); 5462 } 5463 5464 pciide_print_modes(cp); 5465 } 5466 5467 void 5468 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5469 { 5470 struct pciide_channel *cp; 5471 int channel; 5472 pcireg_t cr, interface; 5473 bus_size_t cmdsize, ctlsize; 5474 int rev = sc->sc_rev; 5475 5476 printf(": DMA"); 5477 pciide_mapreg_dma(sc, pa); 5478 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5479 WDC_CAPABILITY_MODE; 5480 5481 if (sc->sc_dma_ok) { 5482 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5483 if (rev >= 0x20) { 5484 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5485 if (rev >= 0xC4) 5486 sc->sc_wdcdev.UDMA_cap = 5; 5487 else if (rev >= 0xC2) 5488 sc->sc_wdcdev.UDMA_cap = 4; 5489 else 5490 sc->sc_wdcdev.UDMA_cap = 2; 5491 } 5492 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5493 sc->sc_wdcdev.irqack = pciide_irqack; 5494 } 5495 5496 sc->sc_wdcdev.PIO_cap = 4; 5497 sc->sc_wdcdev.DMA_cap = 2; 5498 sc->sc_wdcdev.set_modes = acer_setup_channel; 5499 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5500 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5501 5502 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5503 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5504 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5505 5506 /* Enable "microsoft register bits" R/W. */ 5507 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5508 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5509 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5510 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5511 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5512 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5513 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5514 ~ACER_CHANSTATUSREGS_RO); 5515 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5516 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5517 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5518 /* Don't use cr, re-read the real register content instead */ 5519 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5520 PCI_CLASS_REG)); 5521 5522 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5523 5524 /* From linux: enable "Cable Detection" */ 5525 if (rev >= 0xC2) 5526 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5527 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5528 | ACER_0x4B_CDETECT); 5529 5530 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5531 cp = &sc->pciide_channels[channel]; 5532 if (pciide_chansetup(sc, channel, interface) == 0) 5533 continue; 5534 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5535 printf("%s: %s ignored (disabled)\n", 5536 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5537 continue; 5538 } 5539 pciide_map_compat_intr(pa, cp, channel, interface); 5540 if (cp->hw_ok == 0) 5541 continue; 5542 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5543 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5544 if (cp->hw_ok == 0) { 5545 pciide_unmap_compat_intr(pa, cp, channel, interface); 5546 continue; 5547 } 5548 if (pciide_chan_candisable(cp)) { 5549 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5550 pci_conf_write(sc->sc_pc, sc->sc_tag, 5551 PCI_CLASS_REG, cr); 5552 } 5553 if (cp->hw_ok == 0) { 5554 pciide_unmap_compat_intr(pa, cp, channel, interface); 5555 continue; 5556 } 5557 acer_setup_channel(&cp->wdc_channel); 5558 } 5559 } 5560 5561 void 5562 acer_setup_channel(struct channel_softc *chp) 5563 { 5564 struct ata_drive_datas *drvp; 5565 int drive; 5566 u_int32_t acer_fifo_udma; 5567 u_int32_t idedma_ctl; 5568 struct pciide_channel *cp = (struct pciide_channel *)chp; 5569 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5570 5571 idedma_ctl = 0; 5572 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5573 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5574 acer_fifo_udma), DEBUG_PROBE); 5575 /* setup DMA if needed */ 5576 pciide_channel_dma_setup(cp); 5577 5578 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5579 DRIVE_UDMA) { /* check 80 pins cable */ 5580 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5581 ACER_0x4A_80PIN(chp->channel)) { 5582 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5583 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5584 DEBUG_PROBE); 5585 if (chp->ch_drive[0].UDMA_mode > 2) 5586 chp->ch_drive[0].UDMA_mode = 2; 5587 if (chp->ch_drive[1].UDMA_mode > 2) 5588 chp->ch_drive[1].UDMA_mode = 2; 5589 } 5590 } 5591 5592 for (drive = 0; drive < 2; drive++) { 5593 drvp = &chp->ch_drive[drive]; 5594 /* If no drive, skip */ 5595 if ((drvp->drive_flags & DRIVE) == 0) 5596 continue; 5597 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5598 "channel %d drive %d 0x%x\n", chp->channel, drive, 5599 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5600 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5601 /* clear FIFO/DMA mode */ 5602 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5603 ACER_UDMA_EN(chp->channel, drive) | 5604 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5605 5606 /* add timing values, setup DMA if needed */ 5607 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5608 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5609 acer_fifo_udma |= 5610 ACER_FTH_OPL(chp->channel, drive, 0x1); 5611 goto pio; 5612 } 5613 5614 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5615 if (drvp->drive_flags & DRIVE_UDMA) { 5616 /* use Ultra/DMA */ 5617 drvp->drive_flags &= ~DRIVE_DMA; 5618 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5619 acer_fifo_udma |= 5620 ACER_UDMA_TIM(chp->channel, drive, 5621 acer_udma[drvp->UDMA_mode]); 5622 /* XXX disable if one drive < UDMA3 ? */ 5623 if (drvp->UDMA_mode >= 3) { 5624 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5625 ACER_0x4B, 5626 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5627 ACER_0x4B) | ACER_0x4B_UDMA66); 5628 } 5629 } else { 5630 /* 5631 * use Multiword DMA 5632 * Timings will be used for both PIO and DMA, 5633 * so adjust DMA mode if needed 5634 */ 5635 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5636 drvp->PIO_mode = drvp->DMA_mode + 2; 5637 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5638 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5639 drvp->PIO_mode - 2 : 0; 5640 if (drvp->DMA_mode == 0) 5641 drvp->PIO_mode = 0; 5642 } 5643 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5644 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5645 ACER_IDETIM(chp->channel, drive), 5646 acer_pio[drvp->PIO_mode]); 5647 } 5648 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5649 acer_fifo_udma), DEBUG_PROBE); 5650 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5651 if (idedma_ctl != 0) { 5652 /* Add software bits in status register */ 5653 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5654 IDEDMA_CTL(chp->channel), idedma_ctl); 5655 } 5656 pciide_print_modes(cp); 5657 } 5658 5659 int 5660 acer_pci_intr(void *arg) 5661 { 5662 struct pciide_softc *sc = arg; 5663 struct pciide_channel *cp; 5664 struct channel_softc *wdc_cp; 5665 int i, rv, crv; 5666 u_int32_t chids; 5667 5668 rv = 0; 5669 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5670 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5671 cp = &sc->pciide_channels[i]; 5672 wdc_cp = &cp->wdc_channel; 5673 /* If a compat channel skip. */ 5674 if (cp->compat) 5675 continue; 5676 if (chids & ACER_CHIDS_INT(i)) { 5677 crv = wdcintr(wdc_cp); 5678 if (crv == 0) 5679 printf("%s:%d: bogus intr\n", 5680 sc->sc_wdcdev.sc_dev.dv_xname, i); 5681 else 5682 rv = 1; 5683 } 5684 } 5685 return (rv); 5686 } 5687 5688 void 5689 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5690 { 5691 struct pciide_channel *cp; 5692 int i, compatchan, revision; 5693 pcireg_t interface; 5694 bus_size_t cmdsize, ctlsize; 5695 5696 revision = sc->sc_rev; 5697 5698 /* 5699 * when the chip is in native mode it identifies itself as a 5700 * 'misc mass storage'. Fake interface in this case. 5701 */ 5702 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 5703 interface = PCI_INTERFACE(pa->pa_class); 5704 } else { 5705 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 5706 PCIIDE_INTERFACE_PCI(0); 5707 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5708 (revision == HPT370_REV || revision == HPT370A_REV || 5709 revision == HPT372_REV)) || 5710 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5711 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5712 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5713 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5714 interface |= PCIIDE_INTERFACE_PCI(1); 5715 } 5716 5717 printf(": DMA"); 5718 pciide_mapreg_dma(sc, pa); 5719 printf("\n"); 5720 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5721 WDC_CAPABILITY_MODE; 5722 if (sc->sc_dma_ok) { 5723 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5724 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5725 sc->sc_wdcdev.irqack = pciide_irqack; 5726 } 5727 sc->sc_wdcdev.PIO_cap = 4; 5728 sc->sc_wdcdev.DMA_cap = 2; 5729 5730 sc->sc_wdcdev.set_modes = hpt_setup_channel; 5731 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5732 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5733 revision == HPT366_REV) { 5734 sc->sc_wdcdev.UDMA_cap = 4; 5735 /* 5736 * The 366 has 2 PCI IDE functions, one for primary and one 5737 * for secondary. So we need to call pciide_mapregs_compat() 5738 * with the real channel 5739 */ 5740 if (pa->pa_function == 0) { 5741 compatchan = 0; 5742 } else if (pa->pa_function == 1) { 5743 compatchan = 1; 5744 } else { 5745 printf("%s: unexpected PCI function %d\n", 5746 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 5747 return; 5748 } 5749 sc->sc_wdcdev.nchannels = 1; 5750 } else { 5751 sc->sc_wdcdev.nchannels = 2; 5752 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5753 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5754 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5755 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5756 sc->sc_wdcdev.UDMA_cap = 6; 5757 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 5758 if (revision == HPT372_REV) 5759 sc->sc_wdcdev.UDMA_cap = 6; 5760 else 5761 sc->sc_wdcdev.UDMA_cap = 5; 5762 } 5763 } 5764 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5765 cp = &sc->pciide_channels[i]; 5766 if (sc->sc_wdcdev.nchannels > 1) { 5767 compatchan = i; 5768 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 5769 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 5770 printf("%s: %s ignored (disabled)\n", 5771 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5772 continue; 5773 } 5774 } 5775 if (pciide_chansetup(sc, i, interface) == 0) 5776 continue; 5777 if (interface & PCIIDE_INTERFACE_PCI(i)) { 5778 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 5779 &ctlsize, hpt_pci_intr); 5780 } else { 5781 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 5782 &cmdsize, &ctlsize); 5783 } 5784 if (cp->hw_ok == 0) 5785 return; 5786 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 5787 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 5788 wdcattach(&cp->wdc_channel); 5789 hpt_setup_channel(&cp->wdc_channel); 5790 } 5791 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5792 (revision == HPT370_REV || revision == HPT370A_REV || 5793 revision == HPT372_REV)) || 5794 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5795 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5796 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5797 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 5798 /* 5799 * Turn off fast interrupts 5800 */ 5801 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 5802 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 5803 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5804 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 5805 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 5806 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 5807 5808 /* 5809 * HPT370 and highter has a bit to disable interrupts, 5810 * make sure to clear it 5811 */ 5812 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 5813 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 5814 ~HPT_CSEL_IRQDIS); 5815 } 5816 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 5817 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5818 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5819 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5820 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 5821 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5822 revision == HPT372_REV)) 5823 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 5824 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 5825 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 5826 5827 return; 5828 } 5829 5830 void 5831 hpt_setup_channel(struct channel_softc *chp) 5832 { 5833 struct ata_drive_datas *drvp; 5834 int drive; 5835 int cable; 5836 u_int32_t before, after; 5837 u_int32_t idedma_ctl; 5838 struct pciide_channel *cp = (struct pciide_channel *)chp; 5839 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5840 int revision = sc->sc_rev; 5841 u_int32_t *tim_pio, *tim_dma, *tim_udma; 5842 5843 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 5844 5845 /* setup DMA if needed */ 5846 pciide_channel_dma_setup(cp); 5847 5848 idedma_ctl = 0; 5849 5850 switch (sc->sc_pp->ide_product) { 5851 case PCI_PRODUCT_TRIONES_HPT366: 5852 if (revision == HPT370_REV || 5853 revision == HPT370A_REV) { 5854 tim_pio = hpt370_pio; 5855 tim_dma = hpt370_dma; 5856 tim_udma = hpt370_udma; 5857 } else if (revision == HPT372_REV) { 5858 tim_pio = hpt372_pio; 5859 tim_dma = hpt372_dma; 5860 tim_udma = hpt372_udma; 5861 } else { 5862 tim_pio = hpt366_pio; 5863 tim_dma = hpt366_dma; 5864 tim_udma = hpt366_udma; 5865 } 5866 break; 5867 case PCI_PRODUCT_TRIONES_HPT372A: 5868 case PCI_PRODUCT_TRIONES_HPT302: 5869 case PCI_PRODUCT_TRIONES_HPT371: 5870 tim_pio = hpt372_pio; 5871 tim_dma = hpt372_dma; 5872 tim_udma = hpt372_udma; 5873 break; 5874 case PCI_PRODUCT_TRIONES_HPT374: 5875 tim_pio = hpt374_pio; 5876 tim_dma = hpt374_dma; 5877 tim_udma = hpt374_udma; 5878 break; 5879 default: 5880 printf("%s: no known timing values\n", 5881 sc->sc_wdcdev.sc_dev.dv_xname); 5882 goto end; 5883 } 5884 5885 /* Per drive settings */ 5886 for (drive = 0; drive < 2; drive++) { 5887 drvp = &chp->ch_drive[drive]; 5888 /* If no drive, skip */ 5889 if ((drvp->drive_flags & DRIVE) == 0) 5890 continue; 5891 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 5892 HPT_IDETIM(chp->channel, drive)); 5893 5894 /* add timing values, setup DMA if needed */ 5895 if (drvp->drive_flags & DRIVE_UDMA) { 5896 /* use Ultra/DMA */ 5897 drvp->drive_flags &= ~DRIVE_DMA; 5898 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 5899 drvp->UDMA_mode > 2) { 5900 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5901 "cable not detected\n", drvp->drive_name, 5902 sc->sc_wdcdev.sc_dev.dv_xname, 5903 chp->channel, drive), DEBUG_PROBE); 5904 drvp->UDMA_mode = 2; 5905 } 5906 after = tim_udma[drvp->UDMA_mode]; 5907 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5908 } else if (drvp->drive_flags & DRIVE_DMA) { 5909 /* 5910 * use Multiword DMA. 5911 * Timings will be used for both PIO and DMA, so adjust 5912 * DMA mode if needed 5913 */ 5914 if (drvp->PIO_mode >= 3 && 5915 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5916 drvp->DMA_mode = drvp->PIO_mode - 2; 5917 } 5918 after = tim_dma[drvp->DMA_mode]; 5919 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5920 } else { 5921 /* PIO only */ 5922 after = tim_pio[drvp->PIO_mode]; 5923 } 5924 pci_conf_write(sc->sc_pc, sc->sc_tag, 5925 HPT_IDETIM(chp->channel, drive), after); 5926 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 5927 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 5928 after, before), DEBUG_PROBE); 5929 } 5930 end: 5931 if (idedma_ctl != 0) { 5932 /* Add software bits in status register */ 5933 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5934 IDEDMA_CTL(chp->channel), idedma_ctl); 5935 } 5936 pciide_print_modes(cp); 5937 } 5938 5939 int 5940 hpt_pci_intr(void *arg) 5941 { 5942 struct pciide_softc *sc = arg; 5943 struct pciide_channel *cp; 5944 struct channel_softc *wdc_cp; 5945 int rv = 0; 5946 int dmastat, i, crv; 5947 5948 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5949 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5950 IDEDMA_CTL(i)); 5951 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 5952 IDEDMA_CTL_INTR) 5953 continue; 5954 cp = &sc->pciide_channels[i]; 5955 wdc_cp = &cp->wdc_channel; 5956 crv = wdcintr(wdc_cp); 5957 if (crv == 0) { 5958 printf("%s:%d: bogus intr\n", 5959 sc->sc_wdcdev.sc_dev.dv_xname, i); 5960 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5961 IDEDMA_CTL(i), dmastat); 5962 } else 5963 rv = 1; 5964 } 5965 return (rv); 5966 } 5967 5968 /* Macros to test product */ 5969 #define PDC_IS_262(sc) \ 5970 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 5971 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 5972 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 5973 #define PDC_IS_265(sc) \ 5974 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 5975 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 5976 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 5977 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 5978 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5979 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5980 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5981 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5982 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5983 #define PDC_IS_268(sc) \ 5984 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 5985 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 5986 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5987 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5988 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5989 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5990 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5991 #define PDC_IS_269(sc) \ 5992 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 5993 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 5994 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 5995 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 5996 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 5997 5998 u_int8_t 5999 pdc268_config_read(struct channel_softc *chp, int index) 6000 { 6001 struct pciide_channel *cp = (struct pciide_channel *)chp; 6002 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6003 int channel = chp->channel; 6004 6005 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6006 PDC268_INDEX(channel), index); 6007 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6008 PDC268_DATA(channel))); 6009 } 6010 6011 void 6012 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6013 { 6014 struct pciide_channel *cp; 6015 int channel; 6016 pcireg_t interface, st, mode; 6017 bus_size_t cmdsize, ctlsize; 6018 6019 if (!PDC_IS_268(sc)) { 6020 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6021 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 6022 st), DEBUG_PROBE); 6023 } 6024 6025 /* turn off RAID mode */ 6026 if (!PDC_IS_268(sc)) 6027 st &= ~PDC2xx_STATE_IDERAID; 6028 6029 /* 6030 * can't rely on the PCI_CLASS_REG content if the chip was in raid 6031 * mode. We have to fake interface 6032 */ 6033 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 6034 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 6035 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 6036 6037 printf(": DMA"); 6038 pciide_mapreg_dma(sc, pa); 6039 6040 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6041 WDC_CAPABILITY_MODE; 6042 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 6043 PDC_IS_262(sc)) 6044 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 6045 if (sc->sc_dma_ok) { 6046 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6047 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6048 sc->sc_wdcdev.irqack = pciide_irqack; 6049 } 6050 sc->sc_wdcdev.PIO_cap = 4; 6051 sc->sc_wdcdev.DMA_cap = 2; 6052 if (PDC_IS_269(sc)) 6053 sc->sc_wdcdev.UDMA_cap = 6; 6054 else if (PDC_IS_265(sc)) 6055 sc->sc_wdcdev.UDMA_cap = 5; 6056 else if (PDC_IS_262(sc)) 6057 sc->sc_wdcdev.UDMA_cap = 4; 6058 else 6059 sc->sc_wdcdev.UDMA_cap = 2; 6060 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 6061 pdc20268_setup_channel : pdc202xx_setup_channel; 6062 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6063 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6064 6065 if (PDC_IS_262(sc)) { 6066 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 6067 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 6068 } 6069 6070 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6071 if (!PDC_IS_268(sc)) { 6072 /* setup failsafe defaults */ 6073 mode = 0; 6074 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 6075 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 6076 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 6077 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 6078 for (channel = 0; 6079 channel < sc->sc_wdcdev.nchannels; 6080 channel++) { 6081 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6082 "drive 0 initial timings 0x%x, now 0x%x\n", 6083 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6084 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 6085 DEBUG_PROBE); 6086 pci_conf_write(sc->sc_pc, sc->sc_tag, 6087 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 6088 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6089 "drive 1 initial timings 0x%x, now 0x%x\n", 6090 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6091 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 6092 pci_conf_write(sc->sc_pc, sc->sc_tag, 6093 PDC2xx_TIM(channel, 1), mode); 6094 } 6095 6096 mode = PDC2xx_SCR_DMA; 6097 if (PDC_IS_262(sc)) { 6098 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 6099 } else { 6100 /* the BIOS set it up this way */ 6101 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 6102 } 6103 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 6104 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 6105 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 6106 "now 0x%x\n", 6107 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6108 PDC2xx_SCR), 6109 mode), DEBUG_PROBE); 6110 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6111 PDC2xx_SCR, mode); 6112 6113 /* controller initial state register is OK even without BIOS */ 6114 /* Set DMA mode to IDE DMA compatibility */ 6115 mode = 6116 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 6117 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 6118 DEBUG_PROBE); 6119 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 6120 mode | 0x1); 6121 mode = 6122 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 6123 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 6124 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 6125 mode | 0x1); 6126 } 6127 6128 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6129 cp = &sc->pciide_channels[channel]; 6130 if (pciide_chansetup(sc, channel, interface) == 0) 6131 continue; 6132 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 6133 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 6134 printf("%s: %s ignored (disabled)\n", 6135 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6136 continue; 6137 } 6138 pciide_map_compat_intr(pa, cp, channel, interface); 6139 if (cp->hw_ok == 0) 6140 continue; 6141 if (PDC_IS_265(sc)) 6142 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6143 pdc20265_pci_intr); 6144 else 6145 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6146 pdc202xx_pci_intr); 6147 if (cp->hw_ok == 0) { 6148 pciide_unmap_compat_intr(pa, cp, channel, interface); 6149 continue; 6150 } 6151 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6152 st &= ~(PDC_IS_262(sc) ? 6153 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6154 pciide_unmap_compat_intr(pa, cp, channel, interface); 6155 } 6156 if (PDC_IS_268(sc)) 6157 pdc20268_setup_channel(&cp->wdc_channel); 6158 else 6159 pdc202xx_setup_channel(&cp->wdc_channel); 6160 } 6161 if (!PDC_IS_268(sc)) { 6162 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6163 "0x%x\n", st), DEBUG_PROBE); 6164 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6165 } 6166 return; 6167 } 6168 6169 void 6170 pdc202xx_setup_channel(struct channel_softc *chp) 6171 { 6172 struct ata_drive_datas *drvp; 6173 int drive; 6174 pcireg_t mode, st; 6175 u_int32_t idedma_ctl, scr, atapi; 6176 struct pciide_channel *cp = (struct pciide_channel *)chp; 6177 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6178 int channel = chp->channel; 6179 6180 /* setup DMA if needed */ 6181 pciide_channel_dma_setup(cp); 6182 6183 idedma_ctl = 0; 6184 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6185 sc->sc_wdcdev.sc_dev.dv_xname, 6186 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6187 DEBUG_PROBE); 6188 6189 /* Per channel settings */ 6190 if (PDC_IS_262(sc)) { 6191 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6192 PDC262_U66); 6193 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6194 /* Check cable */ 6195 if ((st & PDC262_STATE_80P(channel)) != 0 && 6196 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6197 chp->ch_drive[0].UDMA_mode > 2) || 6198 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6199 chp->ch_drive[1].UDMA_mode > 2))) { 6200 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6201 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6202 DEBUG_PROBE); 6203 if (chp->ch_drive[0].UDMA_mode > 2) 6204 chp->ch_drive[0].UDMA_mode = 2; 6205 if (chp->ch_drive[1].UDMA_mode > 2) 6206 chp->ch_drive[1].UDMA_mode = 2; 6207 } 6208 /* Trim UDMA mode */ 6209 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6210 chp->ch_drive[0].UDMA_mode <= 2) || 6211 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6212 chp->ch_drive[1].UDMA_mode <= 2)) { 6213 if (chp->ch_drive[0].UDMA_mode > 2) 6214 chp->ch_drive[0].UDMA_mode = 2; 6215 if (chp->ch_drive[1].UDMA_mode > 2) 6216 chp->ch_drive[1].UDMA_mode = 2; 6217 } 6218 /* Set U66 if needed */ 6219 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6220 chp->ch_drive[0].UDMA_mode > 2) || 6221 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6222 chp->ch_drive[1].UDMA_mode > 2)) 6223 scr |= PDC262_U66_EN(channel); 6224 else 6225 scr &= ~PDC262_U66_EN(channel); 6226 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6227 PDC262_U66, scr); 6228 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6229 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6230 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6231 PDC262_ATAPI(channel))), DEBUG_PROBE); 6232 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6233 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6234 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6235 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6236 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6237 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6238 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6239 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6240 atapi = 0; 6241 else 6242 atapi = PDC262_ATAPI_UDMA; 6243 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6244 PDC262_ATAPI(channel), atapi); 6245 } 6246 } 6247 for (drive = 0; drive < 2; drive++) { 6248 drvp = &chp->ch_drive[drive]; 6249 /* If no drive, skip */ 6250 if ((drvp->drive_flags & DRIVE) == 0) 6251 continue; 6252 mode = 0; 6253 if (drvp->drive_flags & DRIVE_UDMA) { 6254 /* use Ultra/DMA */ 6255 drvp->drive_flags &= ~DRIVE_DMA; 6256 mode = PDC2xx_TIM_SET_MB(mode, 6257 pdc2xx_udma_mb[drvp->UDMA_mode]); 6258 mode = PDC2xx_TIM_SET_MC(mode, 6259 pdc2xx_udma_mc[drvp->UDMA_mode]); 6260 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6261 } else if (drvp->drive_flags & DRIVE_DMA) { 6262 mode = PDC2xx_TIM_SET_MB(mode, 6263 pdc2xx_dma_mb[drvp->DMA_mode]); 6264 mode = PDC2xx_TIM_SET_MC(mode, 6265 pdc2xx_dma_mc[drvp->DMA_mode]); 6266 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6267 } else { 6268 mode = PDC2xx_TIM_SET_MB(mode, 6269 pdc2xx_dma_mb[0]); 6270 mode = PDC2xx_TIM_SET_MC(mode, 6271 pdc2xx_dma_mc[0]); 6272 } 6273 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6274 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6275 if (drvp->drive_flags & DRIVE_ATA) 6276 mode |= PDC2xx_TIM_PRE; 6277 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6278 if (drvp->PIO_mode >= 3) { 6279 mode |= PDC2xx_TIM_IORDY; 6280 if (drive == 0) 6281 mode |= PDC2xx_TIM_IORDYp; 6282 } 6283 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6284 "timings 0x%x\n", 6285 sc->sc_wdcdev.sc_dev.dv_xname, 6286 chp->channel, drive, mode), DEBUG_PROBE); 6287 pci_conf_write(sc->sc_pc, sc->sc_tag, 6288 PDC2xx_TIM(chp->channel, drive), mode); 6289 } 6290 if (idedma_ctl != 0) { 6291 /* Add software bits in status register */ 6292 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6293 IDEDMA_CTL(channel), idedma_ctl); 6294 } 6295 pciide_print_modes(cp); 6296 } 6297 6298 void 6299 pdc20268_setup_channel(struct channel_softc *chp) 6300 { 6301 struct ata_drive_datas *drvp; 6302 int drive, cable; 6303 u_int32_t idedma_ctl; 6304 struct pciide_channel *cp = (struct pciide_channel *)chp; 6305 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6306 int channel = chp->channel; 6307 6308 /* check 80 pins cable */ 6309 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6310 6311 /* setup DMA if needed */ 6312 pciide_channel_dma_setup(cp); 6313 6314 idedma_ctl = 0; 6315 6316 for (drive = 0; drive < 2; drive++) { 6317 drvp = &chp->ch_drive[drive]; 6318 /* If no drive, skip */ 6319 if ((drvp->drive_flags & DRIVE) == 0) 6320 continue; 6321 if (drvp->drive_flags & DRIVE_UDMA) { 6322 /* use Ultra/DMA */ 6323 drvp->drive_flags &= ~DRIVE_DMA; 6324 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6325 if (cable && drvp->UDMA_mode > 2) { 6326 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6327 "cable not detected\n", drvp->drive_name, 6328 sc->sc_wdcdev.sc_dev.dv_xname, 6329 channel, drive), DEBUG_PROBE); 6330 drvp->UDMA_mode = 2; 6331 } 6332 } else if (drvp->drive_flags & DRIVE_DMA) { 6333 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6334 } 6335 } 6336 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6337 if (idedma_ctl != 0) { 6338 /* Add software bits in status register */ 6339 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6340 IDEDMA_CTL(channel), idedma_ctl); 6341 } 6342 pciide_print_modes(cp); 6343 } 6344 6345 int 6346 pdc202xx_pci_intr(void *arg) 6347 { 6348 struct pciide_softc *sc = arg; 6349 struct pciide_channel *cp; 6350 struct channel_softc *wdc_cp; 6351 int i, rv, crv; 6352 u_int32_t scr; 6353 6354 rv = 0; 6355 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6356 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6357 cp = &sc->pciide_channels[i]; 6358 wdc_cp = &cp->wdc_channel; 6359 /* If a compat channel skip. */ 6360 if (cp->compat) 6361 continue; 6362 if (scr & PDC2xx_SCR_INT(i)) { 6363 crv = wdcintr(wdc_cp); 6364 if (crv == 0) 6365 printf("%s:%d: bogus intr (reg 0x%x)\n", 6366 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6367 else 6368 rv = 1; 6369 } 6370 } 6371 return (rv); 6372 } 6373 6374 int 6375 pdc20265_pci_intr(void *arg) 6376 { 6377 struct pciide_softc *sc = arg; 6378 struct pciide_channel *cp; 6379 struct channel_softc *wdc_cp; 6380 int i, rv, crv; 6381 u_int32_t dmastat; 6382 6383 rv = 0; 6384 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6385 cp = &sc->pciide_channels[i]; 6386 wdc_cp = &cp->wdc_channel; 6387 /* If a compat channel skip. */ 6388 if (cp->compat) 6389 continue; 6390 6391 /* 6392 * In case of shared IRQ check that the interrupt 6393 * was actually generated by this channel. 6394 * Only check the channel that is enabled. 6395 */ 6396 if (cp->hw_ok && PDC_IS_268(sc)) { 6397 if ((pdc268_config_read(wdc_cp, 6398 0x0b) & PDC268_INTR) == 0) 6399 continue; 6400 } 6401 6402 /* 6403 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6404 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6405 * So use it instead (requires 2 reg reads instead of 1, 6406 * but we can't do it another way). 6407 */ 6408 dmastat = bus_space_read_1(sc->sc_dma_iot, 6409 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6410 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6411 continue; 6412 6413 crv = wdcintr(wdc_cp); 6414 if (crv == 0) 6415 printf("%s:%d: bogus intr\n", 6416 sc->sc_wdcdev.sc_dev.dv_xname, i); 6417 else 6418 rv = 1; 6419 } 6420 return (rv); 6421 } 6422 6423 void 6424 pdc20262_dma_start(void *v, int channel, int drive) 6425 { 6426 struct pciide_softc *sc = v; 6427 struct pciide_dma_maps *dma_maps = 6428 &sc->pciide_channels[channel].dma_maps[drive]; 6429 u_int8_t clock; 6430 u_int32_t count; 6431 6432 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6433 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6434 PDC262_U66); 6435 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6436 PDC262_U66, clock | PDC262_U66_EN(channel)); 6437 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6438 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6439 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6440 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6441 PDC262_ATAPI(channel), count); 6442 } 6443 6444 pciide_dma_start(v, channel, drive); 6445 } 6446 6447 int 6448 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6449 { 6450 struct pciide_softc *sc = v; 6451 struct pciide_dma_maps *dma_maps = 6452 &sc->pciide_channels[channel].dma_maps[drive]; 6453 u_int8_t clock; 6454 6455 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6456 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6457 PDC262_U66); 6458 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6459 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6460 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6461 PDC262_ATAPI(channel), 0); 6462 } 6463 6464 return (pciide_dma_finish(v, channel, drive, force)); 6465 } 6466 6467 void 6468 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6469 { 6470 struct pciide_channel *cp; 6471 struct channel_softc *wdc_cp; 6472 struct pciide_pdcsata *ps; 6473 int channel, i; 6474 bus_size_t dmasize; 6475 pci_intr_handle_t intrhandle; 6476 const char *intrstr; 6477 6478 /* Allocate memory for private data */ 6479 sc->sc_cookie = malloc(sizeof(*ps), M_DEVBUF, M_NOWAIT | M_ZERO); 6480 ps = sc->sc_cookie; 6481 6482 /* 6483 * Promise SATA controllers have 3 or 4 channels, 6484 * the usual IDE registers are mapped in I/O space, with offsets. 6485 */ 6486 if (pci_intr_map(pa, &intrhandle) != 0) { 6487 printf(": couldn't map interrupt\n"); 6488 return; 6489 } 6490 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6491 6492 switch (sc->sc_pp->ide_product) { 6493 case PCI_PRODUCT_PROMISE_PDC20318: 6494 case PCI_PRODUCT_PROMISE_PDC20319: 6495 case PCI_PRODUCT_PROMISE_PDC20371: 6496 case PCI_PRODUCT_PROMISE_PDC20375: 6497 case PCI_PRODUCT_PROMISE_PDC20376: 6498 case PCI_PRODUCT_PROMISE_PDC20377: 6499 case PCI_PRODUCT_PROMISE_PDC20378: 6500 case PCI_PRODUCT_PROMISE_PDC20379: 6501 default: 6502 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6503 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6504 sc->sc_wdcdev.sc_dev.dv_xname); 6505 break; 6506 6507 case PCI_PRODUCT_PROMISE_PDC40518: 6508 case PCI_PRODUCT_PROMISE_PDC40519: 6509 case PCI_PRODUCT_PROMISE_PDC40718: 6510 case PCI_PRODUCT_PROMISE_PDC40719: 6511 case PCI_PRODUCT_PROMISE_PDC40779: 6512 case PCI_PRODUCT_PROMISE_PDC20571: 6513 case PCI_PRODUCT_PROMISE_PDC20575: 6514 case PCI_PRODUCT_PROMISE_PDC20579: 6515 case PCI_PRODUCT_PROMISE_PDC20771: 6516 case PCI_PRODUCT_PROMISE_PDC20775: 6517 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6518 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6519 sc->sc_wdcdev.sc_dev.dv_xname); 6520 break; 6521 } 6522 6523 if (sc->sc_pci_ih == NULL) { 6524 printf(": couldn't establish native-PCI interrupt"); 6525 if (intrstr != NULL) 6526 printf(" at %s", intrstr); 6527 printf("\n"); 6528 return; 6529 } 6530 6531 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6532 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6533 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6534 if (!sc->sc_dma_ok) { 6535 printf(": couldn't map bus-master DMA registers\n"); 6536 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6537 return; 6538 } 6539 6540 sc->sc_dmat = pa->pa_dmat; 6541 6542 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6543 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6544 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6545 printf(": couldn't map IDE registers\n"); 6546 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6547 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6548 return; 6549 } 6550 6551 printf(": DMA\n"); 6552 6553 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6554 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6555 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6556 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6557 sc->sc_wdcdev.PIO_cap = 4; 6558 sc->sc_wdcdev.DMA_cap = 2; 6559 sc->sc_wdcdev.UDMA_cap = 6; 6560 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6561 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6562 6563 switch (sc->sc_pp->ide_product) { 6564 case PCI_PRODUCT_PROMISE_PDC20318: 6565 case PCI_PRODUCT_PROMISE_PDC20319: 6566 case PCI_PRODUCT_PROMISE_PDC20371: 6567 case PCI_PRODUCT_PROMISE_PDC20375: 6568 case PCI_PRODUCT_PROMISE_PDC20376: 6569 case PCI_PRODUCT_PROMISE_PDC20377: 6570 case PCI_PRODUCT_PROMISE_PDC20378: 6571 case PCI_PRODUCT_PROMISE_PDC20379: 6572 default: 6573 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6574 sc->sc_wdcdev.nchannels = 6575 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6576 PDC203xx_NCHANNELS : 3; 6577 break; 6578 6579 case PCI_PRODUCT_PROMISE_PDC40518: 6580 case PCI_PRODUCT_PROMISE_PDC40519: 6581 case PCI_PRODUCT_PROMISE_PDC40718: 6582 case PCI_PRODUCT_PROMISE_PDC40719: 6583 case PCI_PRODUCT_PROMISE_PDC40779: 6584 case PCI_PRODUCT_PROMISE_PDC20571: 6585 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6586 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6587 6588 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6589 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6590 6591 break; 6592 case PCI_PRODUCT_PROMISE_PDC20575: 6593 case PCI_PRODUCT_PROMISE_PDC20579: 6594 case PCI_PRODUCT_PROMISE_PDC20771: 6595 case PCI_PRODUCT_PROMISE_PDC20775: 6596 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6597 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6598 6599 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6600 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6601 6602 break; 6603 } 6604 6605 sc->sc_wdcdev.dma_arg = sc; 6606 sc->sc_wdcdev.dma_init = pciide_dma_init; 6607 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6608 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6609 6610 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6611 channel++) { 6612 cp = &sc->pciide_channels[channel]; 6613 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6614 6615 cp->ih = sc->sc_pci_ih; 6616 cp->name = NULL; 6617 cp->wdc_channel.channel = channel; 6618 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6619 cp->wdc_channel.ch_queue = 6620 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 6621 if (cp->wdc_channel.ch_queue == NULL) { 6622 printf("%s: channel %d: " 6623 "can't allocate memory for command queue\n", 6624 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6625 continue; 6626 } 6627 wdc_cp = &cp->wdc_channel; 6628 6629 ps->regs[channel].ctl_iot = ps->ba5_st; 6630 ps->regs[channel].cmd_iot = ps->ba5_st; 6631 6632 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6633 0x0238 + (channel << 7), 1, 6634 &ps->regs[channel].ctl_ioh) != 0) { 6635 printf("%s: couldn't map channel %d ctl regs\n", 6636 sc->sc_wdcdev.sc_dev.dv_xname, 6637 channel); 6638 continue; 6639 } 6640 for (i = 0; i < WDC_NREG; i++) { 6641 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6642 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6643 &ps->regs[channel].cmd_iohs[i]) != 0) { 6644 printf("%s: couldn't map channel %d cmd " 6645 "regs\n", 6646 sc->sc_wdcdev.sc_dev.dv_xname, 6647 channel); 6648 continue; 6649 } 6650 } 6651 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6652 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6653 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6654 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6655 wdc_cp->data32iot = wdc_cp->cmd_iot = 6656 ps->regs[channel].cmd_iot; 6657 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6658 ps->regs[channel].cmd_iohs[0]; 6659 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6660 6661 /* 6662 * Subregion de busmaster registers. They're spread all over 6663 * the controller's register space :(. They are also 4 bytes 6664 * sized, with some specific extentions in the extra bits. 6665 * It also seems that the IDEDMA_CTL register isn't available. 6666 */ 6667 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6668 0x260 + (channel << 7), 1, 6669 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 6670 printf("%s channel %d: can't subregion DMA " 6671 "registers\n", 6672 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6673 continue; 6674 } 6675 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6676 0x244 + (channel << 7), 4, 6677 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 6678 printf("%s channel %d: can't subregion DMA " 6679 "registers\n", 6680 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6681 continue; 6682 } 6683 6684 wdcattach(wdc_cp); 6685 bus_space_write_4(sc->sc_dma_iot, 6686 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6687 (bus_space_read_4(sc->sc_dma_iot, 6688 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6689 0) & ~0x00003f9f) | (channel + 1)); 6690 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6691 (channel + 1) << 2, 0x00000001); 6692 6693 pdc203xx_setup_channel(&cp->wdc_channel); 6694 } 6695 6696 printf("%s: using %s for native-PCI interrupt\n", 6697 sc->sc_wdcdev.sc_dev.dv_xname, 6698 intrstr ? intrstr : "unknown interrupt"); 6699 } 6700 6701 void 6702 pdc203xx_setup_channel(struct channel_softc *chp) 6703 { 6704 struct ata_drive_datas *drvp; 6705 struct pciide_channel *cp = (struct pciide_channel *)chp; 6706 int drive, s; 6707 6708 pciide_channel_dma_setup(cp); 6709 6710 for (drive = 0; drive < 2; drive++) { 6711 drvp = &chp->ch_drive[drive]; 6712 if ((drvp->drive_flags & DRIVE) == 0) 6713 continue; 6714 if (drvp->drive_flags & DRIVE_UDMA) { 6715 s = splbio(); 6716 drvp->drive_flags &= ~DRIVE_DMA; 6717 splx(s); 6718 } 6719 } 6720 pciide_print_modes(cp); 6721 } 6722 6723 int 6724 pdc203xx_pci_intr(void *arg) 6725 { 6726 struct pciide_softc *sc = arg; 6727 struct pciide_channel *cp; 6728 struct channel_softc *wdc_cp; 6729 struct pciide_pdcsata *ps = sc->sc_cookie; 6730 int i, rv, crv; 6731 u_int32_t scr; 6732 6733 rv = 0; 6734 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 6735 6736 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6737 cp = &sc->pciide_channels[i]; 6738 wdc_cp = &cp->wdc_channel; 6739 if (scr & (1 << (i + 1))) { 6740 crv = wdcintr(wdc_cp); 6741 if (crv == 0) { 6742 printf("%s:%d: bogus intr (reg 0x%x)\n", 6743 sc->sc_wdcdev.sc_dev.dv_xname, 6744 i, scr); 6745 } else 6746 rv = 1; 6747 } 6748 } 6749 6750 return (rv); 6751 } 6752 6753 int 6754 pdc205xx_pci_intr(void *arg) 6755 { 6756 struct pciide_softc *sc = arg; 6757 struct pciide_channel *cp; 6758 struct channel_softc *wdc_cp; 6759 struct pciide_pdcsata *ps = sc->sc_cookie; 6760 int i, rv, crv; 6761 u_int32_t scr, status; 6762 6763 rv = 0; 6764 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 6765 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 6766 6767 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 6768 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 6769 6770 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6771 cp = &sc->pciide_channels[i]; 6772 wdc_cp = &cp->wdc_channel; 6773 if (scr & (1 << (i + 1))) { 6774 crv = wdcintr(wdc_cp); 6775 if (crv == 0) { 6776 printf("%s:%d: bogus intr (reg 0x%x)\n", 6777 sc->sc_wdcdev.sc_dev.dv_xname, 6778 i, scr); 6779 } else 6780 rv = 1; 6781 } 6782 } 6783 return rv; 6784 } 6785 6786 void 6787 pdc203xx_irqack(struct channel_softc *chp) 6788 { 6789 struct pciide_channel *cp = (struct pciide_channel *)chp; 6790 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6791 struct pciide_pdcsata *ps = sc->sc_cookie; 6792 int chan = chp->channel; 6793 6794 bus_space_write_4(sc->sc_dma_iot, 6795 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 6796 (bus_space_read_4(sc->sc_dma_iot, 6797 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 6798 0) & ~0x00003f9f) | (chan + 1)); 6799 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6800 (chan + 1) << 2, 0x00000001); 6801 } 6802 6803 void 6804 pdc203xx_dma_start(void *v, int channel, int drive) 6805 { 6806 struct pciide_softc *sc = v; 6807 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6808 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6809 struct pciide_pdcsata *ps = sc->sc_cookie; 6810 6811 /* Write table address */ 6812 bus_space_write_4(sc->sc_dma_iot, 6813 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 6814 dma_maps->dmamap_table->dm_segs[0].ds_addr); 6815 6816 /* Start DMA engine */ 6817 bus_space_write_4(sc->sc_dma_iot, 6818 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6819 (bus_space_read_4(sc->sc_dma_iot, 6820 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6821 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 6822 } 6823 6824 int 6825 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 6826 { 6827 struct pciide_softc *sc = v; 6828 struct pciide_channel *cp = &sc->pciide_channels[channel]; 6829 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 6830 struct pciide_pdcsata *ps = sc->sc_cookie; 6831 6832 /* Stop DMA channel */ 6833 bus_space_write_4(sc->sc_dma_iot, 6834 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6835 (bus_space_read_4(sc->sc_dma_iot, 6836 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6837 0) & ~0x80)); 6838 6839 /* Unload the map of the data buffer */ 6840 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 6841 dma_maps->dmamap_xfer->dm_mapsize, 6842 (dma_maps->dma_flags & WDC_DMA_READ) ? 6843 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 6844 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 6845 6846 return (0); 6847 } 6848 6849 u_int8_t 6850 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 6851 { 6852 struct pciide_channel *cp = (struct pciide_channel *)chp; 6853 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6854 struct pciide_pdcsata *ps = sc->sc_cookie; 6855 u_int8_t val; 6856 6857 if (reg & _WDC_AUX) { 6858 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 6859 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 6860 } else { 6861 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 6862 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 6863 return (val); 6864 } 6865 } 6866 6867 void 6868 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 6869 { 6870 struct pciide_channel *cp = (struct pciide_channel *)chp; 6871 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6872 struct pciide_pdcsata *ps = sc->sc_cookie; 6873 6874 if (reg & _WDC_AUX) 6875 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 6876 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 6877 else 6878 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 6879 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 6880 0, val); 6881 } 6882 6883 void 6884 pdc205xx_do_reset(struct channel_softc *chp) 6885 { 6886 struct pciide_channel *cp = (struct pciide_channel *)chp; 6887 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6888 struct pciide_pdcsata *ps = sc->sc_cookie; 6889 u_int32_t scontrol; 6890 6891 wdc_do_reset(chp); 6892 6893 /* reset SATA */ 6894 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 6895 SCONTROL_WRITE(ps, chp->channel, scontrol); 6896 delay(50*1000); 6897 6898 scontrol &= ~SControl_DET_INIT; 6899 SCONTROL_WRITE(ps, chp->channel, scontrol); 6900 delay(50*1000); 6901 } 6902 6903 void 6904 pdc205xx_drv_probe(struct channel_softc *chp) 6905 { 6906 struct pciide_channel *cp = (struct pciide_channel *)chp; 6907 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6908 struct pciide_pdcsata *ps = sc->sc_cookie; 6909 bus_space_handle_t *iohs; 6910 u_int32_t scontrol, sstatus; 6911 u_int16_t scnt, sn, cl, ch; 6912 int i, s; 6913 6914 /* XXX This should be done by other code. */ 6915 for (i = 0; i < 2; i++) { 6916 chp->ch_drive[i].chnl_softc = chp; 6917 chp->ch_drive[i].drive = i; 6918 } 6919 6920 SCONTROL_WRITE(ps, chp->channel, 0); 6921 delay(50*1000); 6922 6923 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 6924 SCONTROL_WRITE(ps,chp->channel,scontrol); 6925 delay(50*1000); 6926 6927 scontrol &= ~SControl_DET_INIT; 6928 SCONTROL_WRITE(ps,chp->channel,scontrol); 6929 delay(50*1000); 6930 6931 sstatus = SSTATUS_READ(ps,chp->channel); 6932 6933 switch (sstatus & SStatus_DET_mask) { 6934 case SStatus_DET_NODEV: 6935 /* No Device; be silent. */ 6936 break; 6937 6938 case SStatus_DET_DEV_NE: 6939 printf("%s: port %d: device connected, but " 6940 "communication not established\n", 6941 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6942 break; 6943 6944 case SStatus_DET_OFFLINE: 6945 printf("%s: port %d: PHY offline\n", 6946 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6947 break; 6948 6949 case SStatus_DET_DEV: 6950 iohs = ps->regs[chp->channel].cmd_iohs; 6951 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 6952 WDSD_IBM); 6953 delay(10); /* 400ns delay */ 6954 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 6955 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 6956 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 6957 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 6958 #if 0 6959 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 6960 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 6961 scnt, sn, cl, ch); 6962 #endif 6963 /* 6964 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 6965 * cases we get wrong values here, so ignore it. 6966 */ 6967 s = splbio(); 6968 if (cl == 0x14 && ch == 0xeb) 6969 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 6970 else 6971 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 6972 splx(s); 6973 #if 0 6974 printf("%s: port %d: device present", 6975 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 6976 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 6977 case 1: 6978 printf(", speed: 1.5Gb/s"); 6979 break; 6980 case 2: 6981 printf(", speed: 3.0Gb/s"); 6982 break; 6983 } 6984 printf("\n"); 6985 #endif 6986 break; 6987 6988 default: 6989 printf("%s: port %d: unknown SStatus: 0x%08x\n", 6990 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 6991 } 6992 } 6993 6994 #ifdef notyet 6995 /* 6996 * Inline functions for accessing the timing registers of the 6997 * OPTi controller. 6998 * 6999 * These *MUST* disable interrupts as they need atomic access to 7000 * certain magic registers. Failure to adhere to this *will* 7001 * break things in subtle ways if the wdc registers are accessed 7002 * by an interrupt routine while this magic sequence is executing. 7003 */ 7004 static __inline__ u_int8_t 7005 opti_read_config(struct channel_softc *chp, int reg) 7006 { 7007 u_int8_t rv; 7008 int s = splhigh(); 7009 7010 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7011 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7012 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7013 7014 /* Followed by an 8-bit write of 0x3 to register #2 */ 7015 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7016 7017 /* Now we can read the required register */ 7018 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 7019 7020 /* Restore the real registers */ 7021 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7022 7023 splx(s); 7024 7025 return (rv); 7026 } 7027 7028 static __inline__ void 7029 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 7030 { 7031 int s = splhigh(); 7032 7033 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7034 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7035 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7036 7037 /* Followed by an 8-bit write of 0x3 to register #2 */ 7038 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7039 7040 /* Now we can write the required register */ 7041 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 7042 7043 /* Restore the real registers */ 7044 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7045 7046 splx(s); 7047 } 7048 7049 void 7050 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7051 { 7052 struct pciide_channel *cp; 7053 bus_size_t cmdsize, ctlsize; 7054 pcireg_t interface; 7055 u_int8_t init_ctrl; 7056 int channel; 7057 7058 printf(": DMA"); 7059 /* 7060 * XXXSCW: 7061 * There seem to be a couple of buggy revisions/implementations 7062 * of the OPTi pciide chipset. This kludge seems to fix one of 7063 * the reported problems (NetBSD PR/11644) but still fails for the 7064 * other (NetBSD PR/13151), although the latter may be due to other 7065 * issues too... 7066 */ 7067 if (sc->sc_rev <= 0x12) { 7068 printf(" (disabled)"); 7069 sc->sc_dma_ok = 0; 7070 sc->sc_wdcdev.cap = 0; 7071 } else { 7072 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 7073 pciide_mapreg_dma(sc, pa); 7074 } 7075 7076 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 7077 sc->sc_wdcdev.PIO_cap = 4; 7078 if (sc->sc_dma_ok) { 7079 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7080 sc->sc_wdcdev.irqack = pciide_irqack; 7081 sc->sc_wdcdev.DMA_cap = 2; 7082 } 7083 sc->sc_wdcdev.set_modes = opti_setup_channel; 7084 7085 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7086 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7087 7088 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 7089 OPTI_REG_INIT_CONTROL); 7090 7091 interface = PCI_INTERFACE(pa->pa_class); 7092 7093 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7094 7095 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7096 cp = &sc->pciide_channels[channel]; 7097 if (pciide_chansetup(sc, channel, interface) == 0) 7098 continue; 7099 if (channel == 1 && 7100 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 7101 printf("%s: %s ignored (disabled)\n", 7102 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7103 continue; 7104 } 7105 pciide_map_compat_intr(pa, cp, channel, interface); 7106 if (cp->hw_ok == 0) 7107 continue; 7108 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7109 pciide_pci_intr); 7110 if (cp->hw_ok == 0) { 7111 pciide_unmap_compat_intr(pa, cp, channel, interface); 7112 continue; 7113 } 7114 opti_setup_channel(&cp->wdc_channel); 7115 } 7116 } 7117 7118 void 7119 opti_setup_channel(struct channel_softc *chp) 7120 { 7121 struct ata_drive_datas *drvp; 7122 struct pciide_channel *cp = (struct pciide_channel *)chp; 7123 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7124 int drive, spd; 7125 int mode[2]; 7126 u_int8_t rv, mr; 7127 7128 /* 7129 * The `Delay' and `Address Setup Time' fields of the 7130 * Miscellaneous Register are always zero initially. 7131 */ 7132 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 7133 mr &= ~(OPTI_MISC_DELAY_MASK | 7134 OPTI_MISC_ADDR_SETUP_MASK | 7135 OPTI_MISC_INDEX_MASK); 7136 7137 /* Prime the control register before setting timing values */ 7138 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 7139 7140 /* Determine the clockrate of the PCIbus the chip is attached to */ 7141 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7142 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7143 7144 /* setup DMA if needed */ 7145 pciide_channel_dma_setup(cp); 7146 7147 for (drive = 0; drive < 2; drive++) { 7148 drvp = &chp->ch_drive[drive]; 7149 /* If no drive, skip */ 7150 if ((drvp->drive_flags & DRIVE) == 0) { 7151 mode[drive] = -1; 7152 continue; 7153 } 7154 7155 if ((drvp->drive_flags & DRIVE_DMA)) { 7156 /* 7157 * Timings will be used for both PIO and DMA, 7158 * so adjust DMA mode if needed 7159 */ 7160 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7161 drvp->PIO_mode = drvp->DMA_mode + 2; 7162 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7163 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7164 drvp->PIO_mode - 2 : 0; 7165 if (drvp->DMA_mode == 0) 7166 drvp->PIO_mode = 0; 7167 7168 mode[drive] = drvp->DMA_mode + 5; 7169 } else 7170 mode[drive] = drvp->PIO_mode; 7171 7172 if (drive && mode[0] >= 0 && 7173 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7174 /* 7175 * Can't have two drives using different values 7176 * for `Address Setup Time'. 7177 * Slow down the faster drive to compensate. 7178 */ 7179 int d = (opti_tim_as[spd][mode[0]] > 7180 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7181 7182 mode[d] = mode[1-d]; 7183 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7184 chp->ch_drive[d].DMA_mode = 0; 7185 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7186 } 7187 } 7188 7189 for (drive = 0; drive < 2; drive++) { 7190 int m; 7191 if ((m = mode[drive]) < 0) 7192 continue; 7193 7194 /* Set the Address Setup Time and select appropriate index */ 7195 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7196 rv |= OPTI_MISC_INDEX(drive); 7197 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7198 7199 /* Set the pulse width and recovery timing parameters */ 7200 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7201 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7202 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7203 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7204 7205 /* Set the Enhanced Mode register appropriately */ 7206 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7207 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7208 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7209 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7210 } 7211 7212 /* Finally, enable the timings */ 7213 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7214 7215 pciide_print_modes(cp); 7216 } 7217 #endif 7218 7219 void 7220 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7221 { 7222 struct pciide_channel *cp; 7223 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7224 pcitag_t pcib_tag; 7225 int channel; 7226 bus_size_t cmdsize, ctlsize; 7227 7228 printf(": DMA"); 7229 pciide_mapreg_dma(sc, pa); 7230 printf("\n"); 7231 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7232 WDC_CAPABILITY_MODE; 7233 7234 if (sc->sc_dma_ok) { 7235 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7236 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7237 sc->sc_wdcdev.irqack = pciide_irqack; 7238 } 7239 sc->sc_wdcdev.PIO_cap = 4; 7240 sc->sc_wdcdev.DMA_cap = 2; 7241 switch (sc->sc_pp->ide_product) { 7242 case PCI_PRODUCT_RCC_OSB4_IDE: 7243 sc->sc_wdcdev.UDMA_cap = 2; 7244 break; 7245 case PCI_PRODUCT_RCC_CSB5_IDE: 7246 if (sc->sc_rev < 0x92) 7247 sc->sc_wdcdev.UDMA_cap = 4; 7248 else 7249 sc->sc_wdcdev.UDMA_cap = 5; 7250 break; 7251 case PCI_PRODUCT_RCC_CSB6_IDE: 7252 sc->sc_wdcdev.UDMA_cap = 4; 7253 break; 7254 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7255 sc->sc_wdcdev.UDMA_cap = 5; 7256 break; 7257 } 7258 7259 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7260 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7261 sc->sc_wdcdev.nchannels = 7262 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7263 7264 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7265 cp = &sc->pciide_channels[channel]; 7266 if (pciide_chansetup(sc, channel, interface) == 0) 7267 continue; 7268 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7269 serverworks_pci_intr); 7270 if (cp->hw_ok == 0) 7271 return; 7272 pciide_map_compat_intr(pa, cp, channel, interface); 7273 if (cp->hw_ok == 0) 7274 return; 7275 serverworks_setup_channel(&cp->wdc_channel); 7276 } 7277 7278 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7279 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7280 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7281 } 7282 7283 void 7284 serverworks_setup_channel(struct channel_softc *chp) 7285 { 7286 struct ata_drive_datas *drvp; 7287 struct pciide_channel *cp = (struct pciide_channel *)chp; 7288 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7289 int channel = chp->channel; 7290 int drive, unit; 7291 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7292 u_int32_t idedma_ctl; 7293 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7294 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7295 7296 /* setup DMA if needed */ 7297 pciide_channel_dma_setup(cp); 7298 7299 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7300 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7301 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7302 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7303 7304 pio_time &= ~(0xffff << (16 * channel)); 7305 dma_time &= ~(0xffff << (16 * channel)); 7306 pio_mode &= ~(0xff << (8 * channel + 16)); 7307 udma_mode &= ~(0xff << (8 * channel + 16)); 7308 udma_mode &= ~(3 << (2 * channel)); 7309 7310 idedma_ctl = 0; 7311 7312 /* Per drive settings */ 7313 for (drive = 0; drive < 2; drive++) { 7314 drvp = &chp->ch_drive[drive]; 7315 /* If no drive, skip */ 7316 if ((drvp->drive_flags & DRIVE) == 0) 7317 continue; 7318 unit = drive + 2 * channel; 7319 /* add timing values, setup DMA if needed */ 7320 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7321 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7322 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7323 (drvp->drive_flags & DRIVE_UDMA)) { 7324 /* use Ultra/DMA, check for 80-pin cable */ 7325 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7326 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7327 PCI_SUBSYS_ID_REG)) & 7328 (1 << (14 + channel))) == 0) { 7329 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7330 "cable not detected\n", drvp->drive_name, 7331 sc->sc_wdcdev.sc_dev.dv_xname, 7332 channel, drive), DEBUG_PROBE); 7333 drvp->UDMA_mode = 2; 7334 } 7335 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7336 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7337 udma_mode |= 1 << unit; 7338 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7339 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7340 (drvp->drive_flags & DRIVE_DMA)) { 7341 /* use Multiword DMA */ 7342 drvp->drive_flags &= ~DRIVE_UDMA; 7343 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7344 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7345 } else { 7346 /* PIO only */ 7347 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7348 } 7349 } 7350 7351 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7352 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7353 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7354 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7355 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7356 7357 if (idedma_ctl != 0) { 7358 /* Add software bits in status register */ 7359 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7360 IDEDMA_CTL(channel), idedma_ctl); 7361 } 7362 pciide_print_modes(cp); 7363 } 7364 7365 int 7366 serverworks_pci_intr(void *arg) 7367 { 7368 struct pciide_softc *sc = arg; 7369 struct pciide_channel *cp; 7370 struct channel_softc *wdc_cp; 7371 int rv = 0; 7372 int dmastat, i, crv; 7373 7374 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7375 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7376 IDEDMA_CTL(i)); 7377 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7378 IDEDMA_CTL_INTR) 7379 continue; 7380 cp = &sc->pciide_channels[i]; 7381 wdc_cp = &cp->wdc_channel; 7382 crv = wdcintr(wdc_cp); 7383 if (crv == 0) { 7384 printf("%s:%d: bogus intr\n", 7385 sc->sc_wdcdev.sc_dev.dv_xname, i); 7386 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7387 IDEDMA_CTL(i), dmastat); 7388 } else 7389 rv = 1; 7390 } 7391 return (rv); 7392 } 7393 7394 void 7395 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7396 { 7397 struct pciide_channel *cp; 7398 pci_intr_handle_t intrhandle; 7399 const char *intrstr; 7400 int channel; 7401 struct pciide_svwsata *ss; 7402 7403 /* Allocate memory for private data */ 7404 sc->sc_cookie = malloc(sizeof(*ss), M_DEVBUF, M_NOWAIT | M_ZERO); 7405 ss = sc->sc_cookie; 7406 7407 /* The 4-port version has a dummy second function. */ 7408 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7409 PCI_MAPREG_START + 0x14) == 0) { 7410 printf("\n"); 7411 return; 7412 } 7413 7414 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7415 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7416 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7417 printf(": unable to map BA5 register space\n"); 7418 return; 7419 } 7420 7421 printf(": DMA"); 7422 svwsata_mapreg_dma(sc, pa); 7423 printf("\n"); 7424 7425 if (sc->sc_dma_ok) { 7426 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7427 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7428 sc->sc_wdcdev.irqack = pciide_irqack; 7429 } 7430 sc->sc_wdcdev.PIO_cap = 4; 7431 sc->sc_wdcdev.DMA_cap = 2; 7432 sc->sc_wdcdev.UDMA_cap = 6; 7433 7434 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7435 sc->sc_wdcdev.nchannels = 4; 7436 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7437 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7438 sc->sc_wdcdev.set_modes = sata_setup_channel; 7439 7440 /* We can use SControl and SStatus to probe for drives. */ 7441 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7442 7443 /* Map and establish the interrupt handler. */ 7444 if(pci_intr_map(pa, &intrhandle) != 0) { 7445 printf("%s: couldn't map native-PCI interrupt\n", 7446 sc->sc_wdcdev.sc_dev.dv_xname); 7447 return; 7448 } 7449 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7450 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7451 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7452 if (sc->sc_pci_ih != NULL) { 7453 printf("%s: using %s for native-PCI interrupt\n", 7454 sc->sc_wdcdev.sc_dev.dv_xname, 7455 intrstr ? intrstr : "unknown interrupt"); 7456 } else { 7457 printf("%s: couldn't establish native-PCI interrupt", 7458 sc->sc_wdcdev.sc_dev.dv_xname); 7459 if (intrstr != NULL) 7460 printf(" at %s", intrstr); 7461 printf("\n"); 7462 return; 7463 } 7464 7465 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7466 cp = &sc->pciide_channels[channel]; 7467 if (pciide_chansetup(sc, channel, 0) == 0) 7468 continue; 7469 svwsata_mapchan(cp); 7470 sata_setup_channel(&cp->wdc_channel); 7471 } 7472 } 7473 7474 void 7475 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7476 { 7477 struct pciide_svwsata *ss = sc->sc_cookie; 7478 7479 sc->sc_wdcdev.dma_arg = sc; 7480 sc->sc_wdcdev.dma_init = pciide_dma_init; 7481 sc->sc_wdcdev.dma_start = pciide_dma_start; 7482 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7483 7484 /* XXX */ 7485 sc->sc_dma_iot = ss->ba5_st; 7486 sc->sc_dma_ioh = ss->ba5_sh; 7487 7488 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7489 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7490 sc->sc_dmactl_read = svwsata_dmactl_read; 7491 sc->sc_dmactl_write = svwsata_dmactl_write; 7492 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7493 7494 /* DMA registers all set up! */ 7495 sc->sc_dmat = pa->pa_dmat; 7496 sc->sc_dma_ok = 1; 7497 } 7498 7499 u_int8_t 7500 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7501 { 7502 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7503 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7504 } 7505 7506 void 7507 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7508 { 7509 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7510 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7511 } 7512 7513 u_int8_t 7514 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7515 { 7516 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7517 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7518 } 7519 7520 void 7521 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7522 { 7523 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7524 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7525 } 7526 7527 void 7528 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7529 { 7530 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7531 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7532 } 7533 7534 void 7535 svwsata_mapchan(struct pciide_channel *cp) 7536 { 7537 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7538 struct channel_softc *wdc_cp = &cp->wdc_channel; 7539 struct pciide_svwsata *ss = sc->sc_cookie; 7540 7541 cp->compat = 0; 7542 cp->ih = sc->sc_pci_ih; 7543 7544 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7545 (wdc_cp->channel << 8) + SVWSATA_TF0, 7546 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7547 printf("%s: couldn't map %s cmd regs\n", 7548 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7549 return; 7550 } 7551 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7552 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7553 &wdc_cp->ctl_ioh) != 0) { 7554 printf("%s: couldn't map %s ctl regs\n", 7555 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7556 return; 7557 } 7558 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7559 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7560 wdcattach(wdc_cp); 7561 } 7562 7563 void 7564 svwsata_drv_probe(struct channel_softc *chp) 7565 { 7566 struct pciide_channel *cp = (struct pciide_channel *)chp; 7567 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7568 struct pciide_svwsata *ss = sc->sc_cookie; 7569 int channel = chp->channel; 7570 uint32_t scontrol, sstatus; 7571 uint8_t scnt, sn, cl, ch; 7572 int i, s; 7573 7574 /* XXX This should be done by other code. */ 7575 for (i = 0; i < 2; i++) { 7576 chp->ch_drive[i].chnl_softc = chp; 7577 chp->ch_drive[i].drive = i; 7578 } 7579 7580 /* 7581 * Request communication initialization sequence, any speed. 7582 * Performing this is the equivalent of an ATA Reset. 7583 */ 7584 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7585 7586 /* 7587 * XXX We don't yet support SATA power management; disable all 7588 * power management state transitions. 7589 */ 7590 scontrol |= SControl_IPM_NONE; 7591 7592 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7593 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7594 delay(50 * 1000); 7595 scontrol &= ~SControl_DET_INIT; 7596 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7597 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7598 delay(50 * 1000); 7599 7600 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7601 (channel << 8) + SVWSATA_SSTATUS); 7602 #if 0 7603 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7604 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7605 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7606 (channel << 8) + SVWSATA_SSTATUS)); 7607 #endif 7608 switch (sstatus & SStatus_DET_mask) { 7609 case SStatus_DET_NODEV: 7610 /* No device; be silent. */ 7611 break; 7612 7613 case SStatus_DET_DEV_NE: 7614 printf("%s: port %d: device connected, but " 7615 "communication not established\n", 7616 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7617 break; 7618 7619 case SStatus_DET_OFFLINE: 7620 printf("%s: port %d: PHY offline\n", 7621 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7622 break; 7623 7624 case SStatus_DET_DEV: 7625 /* 7626 * XXX ATAPI detection doesn't currently work. Don't 7627 * XXX know why. But, it's not like the standard method 7628 * XXX can detect an ATAPI device connected via a SATA/PATA 7629 * XXX bridge, so at least this is no worse. --thorpej 7630 */ 7631 if (chp->_vtbl != NULL) 7632 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7633 else 7634 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7635 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7636 delay(10); /* 400ns delay */ 7637 /* Save register contents. */ 7638 if (chp->_vtbl != NULL) { 7639 scnt = CHP_READ_REG(chp, wdr_seccnt); 7640 sn = CHP_READ_REG(chp, wdr_sector); 7641 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7642 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7643 } else { 7644 scnt = bus_space_read_1(chp->cmd_iot, 7645 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7646 sn = bus_space_read_1(chp->cmd_iot, 7647 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7648 cl = bus_space_read_1(chp->cmd_iot, 7649 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7650 ch = bus_space_read_1(chp->cmd_iot, 7651 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7652 } 7653 #if 0 7654 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7655 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7656 scnt, sn, cl, ch); 7657 #endif 7658 /* 7659 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7660 * cases we get wrong values here, so ignore it. 7661 */ 7662 s = splbio(); 7663 if (cl == 0x14 && ch == 0xeb) 7664 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7665 else 7666 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7667 splx(s); 7668 7669 printf("%s: port %d: device present", 7670 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7671 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7672 case 1: 7673 printf(", speed: 1.5Gb/s"); 7674 break; 7675 case 2: 7676 printf(", speed: 3.0Gb/s"); 7677 break; 7678 } 7679 printf("\n"); 7680 break; 7681 7682 default: 7683 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7684 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7685 } 7686 } 7687 7688 u_int8_t 7689 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7690 { 7691 if (reg & _WDC_AUX) { 7692 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 7693 (reg & _WDC_REGMASK) << 2)); 7694 } else { 7695 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 7696 (reg & _WDC_REGMASK) << 2)); 7697 } 7698 } 7699 7700 void 7701 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7702 { 7703 if (reg & _WDC_AUX) { 7704 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7705 (reg & _WDC_REGMASK) << 2, val); 7706 } else { 7707 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7708 (reg & _WDC_REGMASK) << 2, val); 7709 } 7710 } 7711 7712 void 7713 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 7714 { 7715 if (reg & _WDC_AUX) { 7716 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7717 (reg & _WDC_REGMASK) << 2, val); 7718 } else { 7719 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7720 (reg & _WDC_REGMASK) << 2, val); 7721 } 7722 } 7723 7724 #define ACARD_IS_850(sc) \ 7725 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 7726 7727 void 7728 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7729 { 7730 struct pciide_channel *cp; 7731 int i; 7732 pcireg_t interface; 7733 bus_size_t cmdsize, ctlsize; 7734 7735 /* 7736 * when the chip is in native mode it identifies itself as a 7737 * 'misc mass storage'. Fake interface in this case. 7738 */ 7739 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 7740 interface = PCI_INTERFACE(pa->pa_class); 7741 } else { 7742 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 7743 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 7744 } 7745 7746 printf(": DMA"); 7747 pciide_mapreg_dma(sc, pa); 7748 printf("\n"); 7749 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7750 WDC_CAPABILITY_MODE; 7751 7752 if (sc->sc_dma_ok) { 7753 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7754 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7755 sc->sc_wdcdev.irqack = pciide_irqack; 7756 } 7757 sc->sc_wdcdev.PIO_cap = 4; 7758 sc->sc_wdcdev.DMA_cap = 2; 7759 switch (sc->sc_pp->ide_product) { 7760 case PCI_PRODUCT_ACARD_ATP850U: 7761 sc->sc_wdcdev.UDMA_cap = 2; 7762 break; 7763 case PCI_PRODUCT_ACARD_ATP860: 7764 case PCI_PRODUCT_ACARD_ATP860A: 7765 sc->sc_wdcdev.UDMA_cap = 4; 7766 break; 7767 case PCI_PRODUCT_ACARD_ATP865A: 7768 case PCI_PRODUCT_ACARD_ATP865R: 7769 sc->sc_wdcdev.UDMA_cap = 6; 7770 break; 7771 } 7772 7773 sc->sc_wdcdev.set_modes = acard_setup_channel; 7774 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7775 sc->sc_wdcdev.nchannels = 2; 7776 7777 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7778 cp = &sc->pciide_channels[i]; 7779 if (pciide_chansetup(sc, i, interface) == 0) 7780 continue; 7781 if (interface & PCIIDE_INTERFACE_PCI(i)) { 7782 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 7783 &ctlsize, pciide_pci_intr); 7784 } else { 7785 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 7786 &cmdsize, &ctlsize); 7787 } 7788 if (cp->hw_ok == 0) 7789 return; 7790 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 7791 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 7792 wdcattach(&cp->wdc_channel); 7793 acard_setup_channel(&cp->wdc_channel); 7794 } 7795 if (!ACARD_IS_850(sc)) { 7796 u_int32_t reg; 7797 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 7798 reg &= ~ATP860_CTRL_INT; 7799 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 7800 } 7801 } 7802 7803 void 7804 acard_setup_channel(struct channel_softc *chp) 7805 { 7806 struct ata_drive_datas *drvp; 7807 struct pciide_channel *cp = (struct pciide_channel *)chp; 7808 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7809 int channel = chp->channel; 7810 int drive; 7811 u_int32_t idetime, udma_mode; 7812 u_int32_t idedma_ctl; 7813 7814 /* setup DMA if needed */ 7815 pciide_channel_dma_setup(cp); 7816 7817 if (ACARD_IS_850(sc)) { 7818 idetime = 0; 7819 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 7820 udma_mode &= ~ATP850_UDMA_MASK(channel); 7821 } else { 7822 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 7823 idetime &= ~ATP860_SETTIME_MASK(channel); 7824 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 7825 udma_mode &= ~ATP860_UDMA_MASK(channel); 7826 } 7827 7828 idedma_ctl = 0; 7829 7830 /* Per drive settings */ 7831 for (drive = 0; drive < 2; drive++) { 7832 drvp = &chp->ch_drive[drive]; 7833 /* If no drive, skip */ 7834 if ((drvp->drive_flags & DRIVE) == 0) 7835 continue; 7836 /* add timing values, setup DMA if needed */ 7837 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7838 (drvp->drive_flags & DRIVE_UDMA)) { 7839 /* use Ultra/DMA */ 7840 if (ACARD_IS_850(sc)) { 7841 idetime |= ATP850_SETTIME(drive, 7842 acard_act_udma[drvp->UDMA_mode], 7843 acard_rec_udma[drvp->UDMA_mode]); 7844 udma_mode |= ATP850_UDMA_MODE(channel, drive, 7845 acard_udma_conf[drvp->UDMA_mode]); 7846 } else { 7847 idetime |= ATP860_SETTIME(channel, drive, 7848 acard_act_udma[drvp->UDMA_mode], 7849 acard_rec_udma[drvp->UDMA_mode]); 7850 udma_mode |= ATP860_UDMA_MODE(channel, drive, 7851 acard_udma_conf[drvp->UDMA_mode]); 7852 } 7853 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7854 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7855 (drvp->drive_flags & DRIVE_DMA)) { 7856 /* use Multiword DMA */ 7857 drvp->drive_flags &= ~DRIVE_UDMA; 7858 if (ACARD_IS_850(sc)) { 7859 idetime |= ATP850_SETTIME(drive, 7860 acard_act_dma[drvp->DMA_mode], 7861 acard_rec_dma[drvp->DMA_mode]); 7862 } else { 7863 idetime |= ATP860_SETTIME(channel, drive, 7864 acard_act_dma[drvp->DMA_mode], 7865 acard_rec_dma[drvp->DMA_mode]); 7866 } 7867 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7868 } else { 7869 /* PIO only */ 7870 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7871 if (ACARD_IS_850(sc)) { 7872 idetime |= ATP850_SETTIME(drive, 7873 acard_act_pio[drvp->PIO_mode], 7874 acard_rec_pio[drvp->PIO_mode]); 7875 } else { 7876 idetime |= ATP860_SETTIME(channel, drive, 7877 acard_act_pio[drvp->PIO_mode], 7878 acard_rec_pio[drvp->PIO_mode]); 7879 } 7880 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 7881 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 7882 | ATP8x0_CTRL_EN(channel)); 7883 } 7884 } 7885 7886 if (idedma_ctl != 0) { 7887 /* Add software bits in status register */ 7888 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7889 IDEDMA_CTL(channel), idedma_ctl); 7890 } 7891 pciide_print_modes(cp); 7892 7893 if (ACARD_IS_850(sc)) { 7894 pci_conf_write(sc->sc_pc, sc->sc_tag, 7895 ATP850_IDETIME(channel), idetime); 7896 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 7897 } else { 7898 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 7899 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 7900 } 7901 } 7902 7903 void 7904 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7905 { 7906 struct pciide_channel *cp; 7907 int channel; 7908 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7909 bus_size_t cmdsize, ctlsize; 7910 u_int32_t conf; 7911 7912 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 7913 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 7914 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 7915 7916 printf(": DMA"); 7917 pciide_mapreg_dma(sc, pa); 7918 7919 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7920 WDC_CAPABILITY_MODE; 7921 if (sc->sc_dma_ok) { 7922 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7923 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7924 sc->sc_wdcdev.irqack = pciide_irqack; 7925 } 7926 sc->sc_wdcdev.PIO_cap = 4; 7927 sc->sc_wdcdev.DMA_cap = 2; 7928 switch (sc->sc_pp->ide_product) { 7929 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 7930 sc->sc_wdcdev.UDMA_cap = 5; 7931 break; 7932 default: 7933 sc->sc_wdcdev.UDMA_cap = 6; 7934 } 7935 sc->sc_wdcdev.set_modes = nforce_setup_channel; 7936 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7937 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7938 7939 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7940 7941 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7942 cp = &sc->pciide_channels[channel]; 7943 7944 if (pciide_chansetup(sc, channel, interface) == 0) 7945 continue; 7946 7947 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 7948 printf("%s: %s ignored (disabled)\n", 7949 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7950 continue; 7951 } 7952 7953 pciide_map_compat_intr(pa, cp, channel, interface); 7954 if (cp->hw_ok == 0) 7955 continue; 7956 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7957 nforce_pci_intr); 7958 if (cp->hw_ok == 0) { 7959 pciide_unmap_compat_intr(pa, cp, channel, interface); 7960 continue; 7961 } 7962 7963 if (pciide_chan_candisable(cp)) { 7964 conf &= ~NFORCE_CHAN_EN(channel); 7965 pciide_unmap_compat_intr(pa, cp, channel, interface); 7966 continue; 7967 } 7968 7969 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 7970 } 7971 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 7972 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 7973 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 7974 } 7975 7976 void 7977 nforce_setup_channel(struct channel_softc *chp) 7978 { 7979 struct ata_drive_datas *drvp; 7980 int drive, mode; 7981 u_int32_t idedma_ctl; 7982 struct pciide_channel *cp = (struct pciide_channel *)chp; 7983 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7984 int channel = chp->channel; 7985 u_int32_t conf, piodmatim, piotim, udmatim; 7986 7987 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 7988 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 7989 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 7990 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 7991 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 7992 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 7993 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 7994 7995 /* Setup DMA if needed */ 7996 pciide_channel_dma_setup(cp); 7997 7998 /* Clear all bits for this channel */ 7999 idedma_ctl = 0; 8000 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 8001 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 8002 8003 /* Per channel settings */ 8004 for (drive = 0; drive < 2; drive++) { 8005 drvp = &chp->ch_drive[drive]; 8006 8007 /* If no drive, skip */ 8008 if ((drvp->drive_flags & DRIVE) == 0) 8009 continue; 8010 8011 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8012 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8013 /* Setup UltraDMA mode */ 8014 drvp->drive_flags &= ~DRIVE_DMA; 8015 8016 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 8017 nforce_udma[drvp->UDMA_mode]) | 8018 NFORCE_UDMA_EN(channel, drive) | 8019 NFORCE_UDMA_ENM(channel, drive); 8020 8021 mode = drvp->PIO_mode; 8022 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8023 (drvp->drive_flags & DRIVE_DMA) != 0) { 8024 /* Setup multiword DMA mode */ 8025 drvp->drive_flags &= ~DRIVE_UDMA; 8026 8027 /* mode = min(pio, dma + 2) */ 8028 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8029 mode = drvp->PIO_mode; 8030 else 8031 mode = drvp->DMA_mode + 2; 8032 } else { 8033 mode = drvp->PIO_mode; 8034 goto pio; 8035 } 8036 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8037 8038 pio: 8039 /* Setup PIO mode */ 8040 if (mode <= 2) { 8041 drvp->DMA_mode = 0; 8042 drvp->PIO_mode = 0; 8043 mode = 0; 8044 } else { 8045 drvp->PIO_mode = mode; 8046 drvp->DMA_mode = mode - 2; 8047 } 8048 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 8049 nforce_pio[mode]); 8050 } 8051 8052 if (idedma_ctl != 0) { 8053 /* Add software bits in status register */ 8054 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8055 IDEDMA_CTL(channel), idedma_ctl); 8056 } 8057 8058 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 8059 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8060 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8061 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 8062 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 8063 8064 pciide_print_modes(cp); 8065 } 8066 8067 int 8068 nforce_pci_intr(void *arg) 8069 { 8070 struct pciide_softc *sc = arg; 8071 struct pciide_channel *cp; 8072 struct channel_softc *wdc_cp; 8073 int i, rv, crv; 8074 u_int32_t dmastat; 8075 8076 rv = 0; 8077 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8078 cp = &sc->pciide_channels[i]; 8079 wdc_cp = &cp->wdc_channel; 8080 8081 /* Skip compat channel */ 8082 if (cp->compat) 8083 continue; 8084 8085 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8086 IDEDMA_CTL(i)); 8087 if ((dmastat & IDEDMA_CTL_INTR) == 0) 8088 continue; 8089 8090 crv = wdcintr(wdc_cp); 8091 if (crv == 0) 8092 printf("%s:%d: bogus intr\n", 8093 sc->sc_wdcdev.sc_dev.dv_xname, i); 8094 else 8095 rv = 1; 8096 } 8097 return (rv); 8098 } 8099 8100 void 8101 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8102 { 8103 struct pciide_channel *cp; 8104 bus_size_t cmdsize, ctlsize; 8105 pcireg_t interface; 8106 int channel; 8107 8108 printf(": DMA"); 8109 #ifdef PCIIDE_I31244_DISABLEDMA 8110 if (sc->sc_rev == 0) { 8111 printf(" disabled due to rev. 0"); 8112 sc->sc_dma_ok = 0; 8113 } else 8114 #endif 8115 pciide_mapreg_dma(sc, pa); 8116 printf("\n"); 8117 8118 /* 8119 * XXX Configure LEDs to show activity. 8120 */ 8121 8122 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8123 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 8124 sc->sc_wdcdev.PIO_cap = 4; 8125 if (sc->sc_dma_ok) { 8126 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8127 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8128 sc->sc_wdcdev.irqack = pciide_irqack; 8129 sc->sc_wdcdev.DMA_cap = 2; 8130 sc->sc_wdcdev.UDMA_cap = 6; 8131 } 8132 sc->sc_wdcdev.set_modes = sata_setup_channel; 8133 8134 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8135 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8136 8137 interface = PCI_INTERFACE(pa->pa_class); 8138 8139 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8140 cp = &sc->pciide_channels[channel]; 8141 if (pciide_chansetup(sc, channel, interface) == 0) 8142 continue; 8143 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8144 pciide_pci_intr); 8145 if (cp->hw_ok == 0) 8146 continue; 8147 pciide_map_compat_intr(pa, cp, channel, interface); 8148 sata_setup_channel(&cp->wdc_channel); 8149 } 8150 } 8151 8152 void 8153 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8154 { 8155 struct pciide_channel *cp; 8156 int channel; 8157 pcireg_t interface; 8158 bus_size_t cmdsize, ctlsize; 8159 pcireg_t cfg, modectl; 8160 8161 /* 8162 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8163 */ 8164 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8165 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8166 8167 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8168 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8169 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8170 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8171 modectl & IT_MODE_MASK), DEBUG_PROBE); 8172 8173 printf(": DMA"); 8174 pciide_mapreg_dma(sc, pa); 8175 8176 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8177 WDC_CAPABILITY_MODE; 8178 if (sc->sc_dma_ok) { 8179 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8180 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8181 sc->sc_wdcdev.irqack = pciide_irqack; 8182 } 8183 sc->sc_wdcdev.PIO_cap = 4; 8184 sc->sc_wdcdev.DMA_cap = 2; 8185 sc->sc_wdcdev.UDMA_cap = 6; 8186 8187 sc->sc_wdcdev.set_modes = ite_setup_channel; 8188 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8189 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8190 8191 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8192 8193 /* Disable RAID */ 8194 modectl &= ~IT_MODE_RAID1; 8195 /* Disable CPU firmware mode */ 8196 modectl &= ~IT_MODE_CPU; 8197 8198 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8199 8200 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8201 cp = &sc->pciide_channels[channel]; 8202 8203 if (pciide_chansetup(sc, channel, interface) == 0) 8204 continue; 8205 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8206 pciide_pci_intr); 8207 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8208 } 8209 8210 /* Re-read configuration registers after channels setup */ 8211 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8212 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8213 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8214 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8215 modectl & IT_MODE_MASK), DEBUG_PROBE); 8216 } 8217 8218 void 8219 ite_setup_channel(struct channel_softc *chp) 8220 { 8221 struct ata_drive_datas *drvp; 8222 int drive, mode; 8223 u_int32_t idedma_ctl; 8224 struct pciide_channel *cp = (struct pciide_channel *)chp; 8225 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8226 int channel = chp->channel; 8227 pcireg_t cfg, modectl; 8228 pcireg_t tim; 8229 8230 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8231 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8232 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8233 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8234 channel, tim), DEBUG_PROBE); 8235 8236 /* Setup DMA if needed */ 8237 pciide_channel_dma_setup(cp); 8238 8239 /* Clear all bits for this channel */ 8240 idedma_ctl = 0; 8241 8242 /* Per channel settings */ 8243 for (drive = 0; drive < 2; drive++) { 8244 drvp = &chp->ch_drive[drive]; 8245 8246 /* If no drive, skip */ 8247 if ((drvp->drive_flags & DRIVE) == 0) 8248 continue; 8249 8250 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8251 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8252 /* Setup UltraDMA mode */ 8253 drvp->drive_flags &= ~DRIVE_DMA; 8254 modectl &= ~IT_MODE_DMA(channel, drive); 8255 8256 #if 0 8257 /* Check cable, works only in CPU firmware mode */ 8258 if (drvp->UDMA_mode > 2 && 8259 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8260 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8261 "80-wire cable not detected\n", 8262 drvp->drive_name, 8263 sc->sc_wdcdev.sc_dev.dv_xname, 8264 channel, drive), DEBUG_PROBE); 8265 drvp->UDMA_mode = 2; 8266 } 8267 #endif 8268 8269 if (drvp->UDMA_mode >= 5) 8270 tim |= IT_TIM_UDMA5(drive); 8271 else 8272 tim &= ~IT_TIM_UDMA5(drive); 8273 8274 mode = drvp->PIO_mode; 8275 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8276 (drvp->drive_flags & DRIVE_DMA) != 0) { 8277 /* Setup multiword DMA mode */ 8278 drvp->drive_flags &= ~DRIVE_UDMA; 8279 modectl |= IT_MODE_DMA(channel, drive); 8280 8281 /* mode = min(pio, dma + 2) */ 8282 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8283 mode = drvp->PIO_mode; 8284 else 8285 mode = drvp->DMA_mode + 2; 8286 } else { 8287 goto pio; 8288 } 8289 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8290 8291 pio: 8292 /* Setup PIO mode */ 8293 if (mode <= 2) { 8294 drvp->DMA_mode = 0; 8295 drvp->PIO_mode = 0; 8296 mode = 0; 8297 } else { 8298 drvp->PIO_mode = mode; 8299 drvp->DMA_mode = mode - 2; 8300 } 8301 8302 /* Enable IORDY if PIO mode >= 3 */ 8303 if (drvp->PIO_mode >= 3) 8304 cfg |= IT_CFG_IORDY(channel); 8305 } 8306 8307 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8308 tim), DEBUG_PROBE); 8309 8310 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8311 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8312 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8313 8314 if (idedma_ctl != 0) { 8315 /* Add software bits in status register */ 8316 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8317 IDEDMA_CTL(channel), idedma_ctl); 8318 } 8319 8320 pciide_print_modes(cp); 8321 } 8322 8323 void 8324 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8325 { 8326 struct pciide_channel *cp; 8327 int channel; 8328 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8329 bus_size_t cmdsize, ctlsize; 8330 8331 printf(": DMA"); 8332 pciide_mapreg_dma(sc, pa); 8333 8334 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8335 WDC_CAPABILITY_MODE; 8336 if (sc->sc_dma_ok) { 8337 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8338 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8339 sc->sc_wdcdev.irqack = pciide_irqack; 8340 } 8341 sc->sc_wdcdev.PIO_cap = 4; 8342 sc->sc_wdcdev.DMA_cap = 2; 8343 sc->sc_wdcdev.UDMA_cap = 6; 8344 8345 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8346 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8347 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8348 8349 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8350 8351 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8352 cp = &sc->pciide_channels[channel]; 8353 if (pciide_chansetup(sc, channel, interface) == 0) 8354 continue; 8355 pciide_map_compat_intr(pa, cp, channel, interface); 8356 if (cp->hw_ok == 0) 8357 continue; 8358 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8359 pciide_pci_intr); 8360 if (cp->hw_ok == 0) { 8361 pciide_unmap_compat_intr(pa, cp, channel, interface); 8362 continue; 8363 } 8364 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8365 } 8366 } 8367 8368 void 8369 ixp_setup_channel(struct channel_softc *chp) 8370 { 8371 struct ata_drive_datas *drvp; 8372 int drive, mode; 8373 u_int32_t idedma_ctl; 8374 struct pciide_channel *cp = (struct pciide_channel*)chp; 8375 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8376 int channel = chp->channel; 8377 pcireg_t udma, mdma_timing, pio, pio_timing; 8378 8379 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8380 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8381 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8382 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8383 8384 /* Setup DMA if needed */ 8385 pciide_channel_dma_setup(cp); 8386 8387 idedma_ctl = 0; 8388 8389 /* Per channel settings */ 8390 for (drive = 0; drive < 2; drive++) { 8391 drvp = &chp->ch_drive[drive]; 8392 8393 /* If no drive, skip */ 8394 if ((drvp->drive_flags & DRIVE) == 0) 8395 continue; 8396 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8397 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8398 /* Setup UltraDMA mode */ 8399 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8400 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8401 IXP_SET_MODE(udma, chp->channel, drive, 8402 drvp->UDMA_mode); 8403 mode = drvp->PIO_mode; 8404 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8405 (drvp->drive_flags & DRIVE_DMA) != 0) { 8406 /* Setup multiword DMA mode */ 8407 drvp->drive_flags &= ~DRIVE_UDMA; 8408 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8409 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8410 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8411 ixp_mdma_timings[drvp->DMA_mode]); 8412 8413 /* mode = min(pio, dma + 2) */ 8414 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8415 mode = drvp->PIO_mode; 8416 else 8417 mode = drvp->DMA_mode + 2; 8418 } else { 8419 mode = drvp->PIO_mode; 8420 } 8421 8422 /* Setup PIO mode */ 8423 drvp->PIO_mode = mode; 8424 if (mode < 2) 8425 drvp->DMA_mode = 0; 8426 else 8427 drvp->DMA_mode = mode - 2; 8428 /* 8429 * Set PIO mode and timings 8430 * Linux driver avoids PIO mode 1, let's do it too. 8431 */ 8432 if (drvp->PIO_mode == 1) 8433 drvp->PIO_mode = 0; 8434 8435 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8436 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8437 ixp_pio_timings[drvp->PIO_mode]); 8438 } 8439 8440 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8441 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8442 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8443 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8444 8445 if (idedma_ctl != 0) { 8446 /* Add software bits in status register */ 8447 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8448 IDEDMA_CTL(channel), idedma_ctl); 8449 } 8450 8451 pciide_print_modes(cp); 8452 } 8453 8454 void 8455 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8456 { 8457 struct pciide_channel *cp; 8458 int channel; 8459 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8460 bus_size_t cmdsize, ctlsize; 8461 u_int32_t conf; 8462 8463 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8464 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8465 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8466 8467 printf(": DMA"); 8468 pciide_mapreg_dma(sc, pa); 8469 8470 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8471 WDC_CAPABILITY_MODE; 8472 if (sc->sc_dma_ok) { 8473 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8474 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8475 sc->sc_wdcdev.irqack = pciide_irqack; 8476 } 8477 sc->sc_wdcdev.PIO_cap = 4; 8478 sc->sc_wdcdev.DMA_cap = 2; 8479 sc->sc_wdcdev.UDMA_cap = 6; 8480 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8481 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8482 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8483 8484 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8485 8486 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8487 cp = &sc->pciide_channels[channel]; 8488 8489 if (pciide_chansetup(sc, channel, interface) == 0) 8490 continue; 8491 8492 #if 0 8493 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8494 printf("%s: %s ignored (disabled)\n", 8495 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8496 continue; 8497 } 8498 #endif 8499 8500 pciide_map_compat_intr(pa, cp, channel, interface); 8501 if (cp->hw_ok == 0) 8502 continue; 8503 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8504 pciide_pci_intr); 8505 if (cp->hw_ok == 0) { 8506 pciide_unmap_compat_intr(pa, cp, channel, interface); 8507 continue; 8508 } 8509 8510 if (pciide_chan_candisable(cp)) { 8511 conf &= ~JMICRON_CHAN_EN(channel); 8512 pciide_unmap_compat_intr(pa, cp, channel, interface); 8513 continue; 8514 } 8515 8516 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8517 } 8518 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8519 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8520 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8521 } 8522 8523 void 8524 jmicron_setup_channel(struct channel_softc *chp) 8525 { 8526 struct ata_drive_datas *drvp; 8527 int drive, mode; 8528 u_int32_t idedma_ctl; 8529 struct pciide_channel *cp = (struct pciide_channel *)chp; 8530 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8531 int channel = chp->channel; 8532 u_int32_t conf; 8533 8534 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8535 8536 /* Setup DMA if needed */ 8537 pciide_channel_dma_setup(cp); 8538 8539 /* Clear all bits for this channel */ 8540 idedma_ctl = 0; 8541 8542 /* Per channel settings */ 8543 for (drive = 0; drive < 2; drive++) { 8544 drvp = &chp->ch_drive[drive]; 8545 8546 /* If no drive, skip */ 8547 if ((drvp->drive_flags & DRIVE) == 0) 8548 continue; 8549 8550 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8551 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8552 /* Setup UltraDMA mode */ 8553 drvp->drive_flags &= ~DRIVE_DMA; 8554 8555 /* see if cable is up to scratch */ 8556 if ((conf & JMICRON_CONF_40PIN) && 8557 (drvp->UDMA_mode > 2)) 8558 drvp->UDMA_mode = 2; 8559 8560 mode = drvp->PIO_mode; 8561 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8562 (drvp->drive_flags & DRIVE_DMA) != 0) { 8563 /* Setup multiword DMA mode */ 8564 drvp->drive_flags &= ~DRIVE_UDMA; 8565 8566 /* mode = min(pio, dma + 2) */ 8567 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8568 mode = drvp->PIO_mode; 8569 else 8570 mode = drvp->DMA_mode + 2; 8571 } else { 8572 mode = drvp->PIO_mode; 8573 goto pio; 8574 } 8575 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8576 8577 pio: 8578 /* Setup PIO mode */ 8579 if (mode <= 2) { 8580 drvp->DMA_mode = 0; 8581 drvp->PIO_mode = 0; 8582 } else { 8583 drvp->PIO_mode = mode; 8584 drvp->DMA_mode = mode - 2; 8585 } 8586 } 8587 8588 if (idedma_ctl != 0) { 8589 /* Add software bits in status register */ 8590 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8591 IDEDMA_CTL(channel), idedma_ctl); 8592 } 8593 8594 pciide_print_modes(cp); 8595 } 8596 8597 void 8598 phison_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8599 { 8600 struct pciide_channel *cp; 8601 int channel; 8602 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8603 bus_size_t cmdsize, ctlsize; 8604 u_int32_t conf; 8605 8606 sc->chip_unmap = default_chip_unmap; 8607 8608 printf(": DMA"); 8609 pciide_mapreg_dma(sc, pa); 8610 8611 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8612 WDC_CAPABILITY_MODE; 8613 if (sc->sc_dma_ok) { 8614 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8615 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8616 sc->sc_wdcdev.irqack = pciide_irqack; 8617 } 8618 sc->sc_wdcdev.PIO_cap = 4; 8619 sc->sc_wdcdev.DMA_cap = 2; 8620 sc->sc_wdcdev.UDMA_cap = 5; 8621 sc->sc_wdcdev.set_modes = phison_setup_channel; 8622 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8623 sc->sc_wdcdev.nchannels = 1; 8624 8625 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8626 8627 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8628 cp = &sc->pciide_channels[channel]; 8629 8630 if (pciide_chansetup(sc, channel, interface) == 0) 8631 continue; 8632 8633 pciide_map_compat_intr(pa, cp, channel, interface); 8634 if (cp->hw_ok == 0) 8635 continue; 8636 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8637 pciide_pci_intr); 8638 if (cp->hw_ok == 0) { 8639 pciide_unmap_compat_intr(pa, cp, channel, interface); 8640 continue; 8641 } 8642 8643 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8644 } 8645 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8646 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8647 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8648 } 8649 8650 void 8651 phison_setup_channel(struct channel_softc *chp) 8652 { 8653 struct ata_drive_datas *drvp; 8654 int drive, mode; 8655 u_int32_t idedma_ctl; 8656 struct pciide_channel *cp = (struct pciide_channel *)chp; 8657 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8658 int channel = chp->channel; 8659 8660 /* Setup DMA if needed */ 8661 pciide_channel_dma_setup(cp); 8662 8663 /* Clear all bits for this channel */ 8664 idedma_ctl = 0; 8665 8666 /* Per channel settings */ 8667 for (drive = 0; drive < 2; drive++) { 8668 drvp = &chp->ch_drive[drive]; 8669 8670 /* If no drive, skip */ 8671 if ((drvp->drive_flags & DRIVE) == 0) 8672 continue; 8673 8674 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8675 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8676 /* Setup UltraDMA mode */ 8677 drvp->drive_flags &= ~DRIVE_DMA; 8678 mode = drvp->PIO_mode; 8679 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8680 (drvp->drive_flags & DRIVE_DMA) != 0) { 8681 /* Setup multiword DMA mode */ 8682 drvp->drive_flags &= ~DRIVE_UDMA; 8683 8684 /* mode = min(pio, dma + 2) */ 8685 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8686 mode = drvp->PIO_mode; 8687 else 8688 mode = drvp->DMA_mode + 2; 8689 } else { 8690 mode = drvp->PIO_mode; 8691 goto pio; 8692 } 8693 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8694 8695 pio: 8696 /* Setup PIO mode */ 8697 if (mode <= 2) { 8698 drvp->DMA_mode = 0; 8699 drvp->PIO_mode = 0; 8700 } else { 8701 drvp->PIO_mode = mode; 8702 drvp->DMA_mode = mode - 2; 8703 } 8704 } 8705 8706 if (idedma_ctl != 0) { 8707 /* Add software bits in status register */ 8708 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8709 IDEDMA_CTL(channel), idedma_ctl); 8710 } 8711 8712 pciide_print_modes(cp); 8713 } 8714 8715 void 8716 sch_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8717 { 8718 struct pciide_channel *cp; 8719 int channel; 8720 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8721 bus_size_t cmdsize, ctlsize; 8722 8723 printf(": DMA"); 8724 pciide_mapreg_dma(sc, pa); 8725 8726 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8727 WDC_CAPABILITY_MODE; 8728 if (sc->sc_dma_ok) { 8729 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8730 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8731 sc->sc_wdcdev.irqack = pciide_irqack; 8732 } 8733 sc->sc_wdcdev.PIO_cap = 4; 8734 sc->sc_wdcdev.DMA_cap = 2; 8735 sc->sc_wdcdev.UDMA_cap = 6; 8736 sc->sc_wdcdev.set_modes = sch_setup_channel; 8737 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8738 sc->sc_wdcdev.nchannels = 1; 8739 8740 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8741 8742 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8743 cp = &sc->pciide_channels[channel]; 8744 8745 if (pciide_chansetup(sc, channel, interface) == 0) 8746 continue; 8747 8748 pciide_map_compat_intr(pa, cp, channel, interface); 8749 if (cp->hw_ok == 0) 8750 continue; 8751 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8752 pciide_pci_intr); 8753 if (cp->hw_ok == 0) { 8754 pciide_unmap_compat_intr(pa, cp, channel, interface); 8755 continue; 8756 } 8757 8758 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8759 } 8760 } 8761 8762 void 8763 sch_setup_channel(struct channel_softc *chp) 8764 { 8765 struct ata_drive_datas *drvp; 8766 int drive, mode; 8767 u_int32_t tim, timaddr; 8768 struct pciide_channel *cp = (struct pciide_channel *)chp; 8769 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8770 8771 /* Setup DMA if needed */ 8772 pciide_channel_dma_setup(cp); 8773 8774 /* Per channel settings */ 8775 for (drive = 0; drive < 2; drive++) { 8776 drvp = &chp->ch_drive[drive]; 8777 8778 /* If no drive, skip */ 8779 if ((drvp->drive_flags & DRIVE) == 0) 8780 continue; 8781 8782 timaddr = (drive == 0) ? SCH_D0TIM : SCH_D1TIM; 8783 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, timaddr); 8784 tim &= ~SCH_TIM_MASK; 8785 8786 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8787 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8788 /* Setup UltraDMA mode */ 8789 drvp->drive_flags &= ~DRIVE_DMA; 8790 8791 mode = drvp->PIO_mode; 8792 tim |= (drvp->UDMA_mode << 16) | SCH_TIM_SYNCDMA; 8793 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8794 (drvp->drive_flags & DRIVE_DMA) != 0) { 8795 /* Setup multiword DMA mode */ 8796 drvp->drive_flags &= ~DRIVE_UDMA; 8797 8798 tim &= ~SCH_TIM_SYNCDMA; 8799 8800 /* mode = min(pio, dma + 2) */ 8801 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8802 mode = drvp->PIO_mode; 8803 else 8804 mode = drvp->DMA_mode + 2; 8805 } else { 8806 mode = drvp->PIO_mode; 8807 goto pio; 8808 } 8809 8810 pio: 8811 /* Setup PIO mode */ 8812 if (mode <= 2) { 8813 drvp->DMA_mode = 0; 8814 drvp->PIO_mode = 0; 8815 } else { 8816 drvp->PIO_mode = mode; 8817 drvp->DMA_mode = mode - 2; 8818 } 8819 tim |= (drvp->DMA_mode << 8) | (drvp->PIO_mode); 8820 pci_conf_write(sc->sc_pc, sc->sc_tag, timaddr, tim); 8821 } 8822 8823 pciide_print_modes(cp); 8824 } 8825