1 /* $OpenBSD: pciide.c,v 1.347 2014/07/13 23:19:51 sasano Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions 34 * are met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 3. All advertising materials mentioning features or use of this software 41 * must display the following acknowledgement: 42 * This product includes software developed by Christopher G. Demetriou 43 * for the NetBSD Project. 44 * 4. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * PCI IDE controller driver. 61 * 62 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 63 * sys/dev/pci/ppb.c, revision 1.16). 64 * 65 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 66 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 67 * 5/16/94" from the PCI SIG. 68 * 69 */ 70 71 #define DEBUG_DMA 0x01 72 #define DEBUG_XFERS 0x02 73 #define DEBUG_FUNCS 0x08 74 #define DEBUG_PROBE 0x10 75 76 #ifdef WDCDEBUG 77 #ifndef WDCDEBUG_PCIIDE_MASK 78 #define WDCDEBUG_PCIIDE_MASK 0x00 79 #endif 80 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 81 #define WDCDEBUG_PRINT(args, level) do { \ 82 if ((wdcdebug_pciide_mask & (level)) != 0) \ 83 printf args; \ 84 } while (0) 85 #else 86 #define WDCDEBUG_PRINT(args, level) 87 #endif 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/device.h> 91 #include <sys/malloc.h> 92 93 #include <machine/bus.h> 94 #include <machine/endian.h> 95 96 #include <dev/ata/atavar.h> 97 #include <dev/ata/satareg.h> 98 #include <dev/ic/wdcreg.h> 99 #include <dev/ic/wdcvar.h> 100 101 #include <dev/pci/pcireg.h> 102 #include <dev/pci/pcivar.h> 103 #include <dev/pci/pcidevs.h> 104 105 #include <dev/pci/pciidereg.h> 106 #include <dev/pci/pciidevar.h> 107 #include <dev/pci/pciide_piix_reg.h> 108 #include <dev/pci/pciide_amd_reg.h> 109 #include <dev/pci/pciide_apollo_reg.h> 110 #include <dev/pci/pciide_cmd_reg.h> 111 #include <dev/pci/pciide_sii3112_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/pciide_acard_reg.h> 119 #include <dev/pci/pciide_natsemi_reg.h> 120 #include <dev/pci/pciide_nforce_reg.h> 121 #include <dev/pci/pciide_i31244_reg.h> 122 #include <dev/pci/pciide_ite_reg.h> 123 #include <dev/pci/pciide_ixp_reg.h> 124 #include <dev/pci/pciide_svwsata_reg.h> 125 #include <dev/pci/pciide_jmicron_reg.h> 126 #include <dev/pci/pciide_rdc_reg.h> 127 #include <dev/pci/cy82c693var.h> 128 129 /* functions for reading/writing 8-bit PCI registers */ 130 131 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 132 int); 133 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 134 int, u_int8_t); 135 136 u_int8_t 137 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 138 { 139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 140 ((reg & 0x03) * 8) & 0xff); 141 } 142 143 void 144 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 145 { 146 pcireg_t pcival; 147 148 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 149 pcival &= ~(0xff << ((reg & 0x03) * 8)); 150 pcival |= (val << ((reg & 0x03) * 8)); 151 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 152 } 153 154 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 155 156 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 157 void sata_setup_channel(struct channel_softc *); 158 159 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 160 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 161 void piix_setup_channel(struct channel_softc *); 162 void piix3_4_setup_channel(struct channel_softc *); 163 void piix_timing_debug(struct pciide_softc *); 164 165 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 166 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 167 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 168 169 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 170 void amd756_setup_channel(struct channel_softc *); 171 172 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 173 void apollo_setup_channel(struct channel_softc *); 174 175 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 176 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 177 void cmd0643_9_setup_channel(struct channel_softc *); 178 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 179 void cmd680_setup_channel(struct channel_softc *); 180 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 181 void cmd_channel_map(struct pci_attach_args *, 182 struct pciide_softc *, int); 183 int cmd_pci_intr(void *); 184 void cmd646_9_irqack(struct channel_softc *); 185 186 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 187 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 188 void sii3112_setup_channel(struct channel_softc *); 189 void sii3112_drv_probe(struct channel_softc *); 190 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 191 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 192 int sii3114_chansetup(struct pciide_softc *, int); 193 void sii3114_mapchan(struct pciide_channel *); 194 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 195 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 196 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 197 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 198 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 199 200 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 201 void cy693_setup_channel(struct channel_softc *); 202 203 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 204 void sis_setup_channel(struct channel_softc *); 205 void sis96x_setup_channel(struct channel_softc *); 206 int sis_hostbr_match(struct pci_attach_args *); 207 int sis_south_match(struct pci_attach_args *); 208 209 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 210 void natsemi_setup_channel(struct channel_softc *); 211 int natsemi_pci_intr(void *); 212 void natsemi_irqack(struct channel_softc *); 213 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 214 void ns_scx200_setup_channel(struct channel_softc *); 215 216 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 217 void acer_setup_channel(struct channel_softc *); 218 int acer_pci_intr(void *); 219 int acer_dma_init(void *, int, int, void *, size_t, int); 220 221 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 222 void pdc202xx_setup_channel(struct channel_softc *); 223 void pdc20268_setup_channel(struct channel_softc *); 224 int pdc202xx_pci_intr(void *); 225 int pdc20265_pci_intr(void *); 226 void pdc20262_dma_start(void *, int, int); 227 int pdc20262_dma_finish(void *, int, int, int); 228 229 u_int8_t pdc268_config_read(struct channel_softc *, int); 230 231 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 232 void pdc203xx_setup_channel(struct channel_softc *); 233 int pdc203xx_pci_intr(void *); 234 void pdc203xx_irqack(struct channel_softc *); 235 void pdc203xx_dma_start(void *,int ,int); 236 int pdc203xx_dma_finish(void *, int, int, int); 237 int pdc205xx_pci_intr(void *); 238 void pdc205xx_do_reset(struct channel_softc *); 239 void pdc205xx_drv_probe(struct channel_softc *); 240 241 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 242 void opti_setup_channel(struct channel_softc *); 243 244 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 245 void hpt_setup_channel(struct channel_softc *); 246 int hpt_pci_intr(void *); 247 248 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 249 void acard_setup_channel(struct channel_softc *); 250 251 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 252 void serverworks_setup_channel(struct channel_softc *); 253 int serverworks_pci_intr(void *); 254 255 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 256 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 257 void svwsata_mapchan(struct pciide_channel *); 258 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 259 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 260 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 261 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 262 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 263 void svwsata_drv_probe(struct channel_softc *); 264 265 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 266 void nforce_setup_channel(struct channel_softc *); 267 int nforce_pci_intr(void *); 268 269 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 270 271 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 272 void ite_setup_channel(struct channel_softc *); 273 274 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 275 void ixp_setup_channel(struct channel_softc *); 276 277 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 278 void jmicron_setup_channel(struct channel_softc *); 279 280 void phison_chip_map(struct pciide_softc *, struct pci_attach_args *); 281 void phison_setup_channel(struct channel_softc *); 282 283 void sch_chip_map(struct pciide_softc *, struct pci_attach_args *); 284 void sch_setup_channel(struct channel_softc *); 285 286 void rdc_chip_map(struct pciide_softc *, struct pci_attach_args *); 287 void rdc_setup_channel(struct channel_softc *); 288 289 struct pciide_product_desc { 290 u_int32_t ide_product; 291 u_short ide_flags; 292 /* map and setup chip, probe drives */ 293 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 294 }; 295 296 /* Flags for ide_flags */ 297 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 298 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 299 300 /* Default product description for devices not known from this controller */ 301 const struct pciide_product_desc default_product_desc = { 302 0, /* Generic PCI IDE controller */ 303 0, 304 default_chip_map 305 }; 306 307 const struct pciide_product_desc pciide_intel_products[] = { 308 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 309 0, 310 artisea_chip_map 311 }, 312 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 313 0, 314 default_chip_map 315 }, 316 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 317 0, 318 piix_chip_map 319 }, 320 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 321 0, 322 piix_chip_map 323 }, 324 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 325 0, 326 piix_chip_map 327 }, 328 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 329 0, 330 piix_chip_map 331 }, 332 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 333 0, 334 piix_chip_map 335 }, 336 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 337 0, 338 piix_chip_map 339 }, 340 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 341 0, 342 piix_chip_map 343 }, 344 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 345 0, 346 piix_chip_map 347 }, 348 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 349 0, 350 piix_chip_map 351 }, 352 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 353 0, 354 piix_chip_map 355 }, 356 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 357 0, 358 piix_chip_map 359 }, 360 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 361 0, 362 piix_chip_map 363 }, 364 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 365 0, 366 piix_chip_map 367 }, 368 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 369 0, 370 piix_chip_map 371 }, 372 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 373 0, 374 piix_chip_map 375 }, 376 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 377 0, 378 piix_chip_map 379 }, 380 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 381 0, 382 piix_chip_map 383 }, 384 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 385 0, 386 piix_chip_map 387 }, 388 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 389 0, 390 piixsata_chip_map 391 }, 392 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 393 0, 394 piixsata_chip_map 395 }, 396 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 397 0, 398 piix_chip_map 399 }, 400 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 401 0, 402 piixsata_chip_map 403 }, 404 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 405 0, 406 piixsata_chip_map 407 }, 408 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 409 0, 410 piix_chip_map 411 }, 412 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 413 0, 414 piix_chip_map 415 }, 416 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 417 0, 418 piixsata_chip_map 419 }, 420 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 421 0, 422 piixsata_chip_map 423 }, 424 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 425 0, 426 piixsata_chip_map 427 }, 428 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 429 0, 430 piix_chip_map 431 }, 432 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 433 0, 434 piixsata_chip_map 435 }, 436 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 437 0, 438 piixsata_chip_map 439 }, 440 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 441 0, 442 piixsata_chip_map 443 }, 444 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 445 0, 446 piixsata_chip_map 447 }, 448 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 449 0, 450 piixsata_chip_map 451 }, 452 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 453 0, 454 piixsata_chip_map 455 }, 456 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 457 0, 458 piixsata_chip_map 459 }, 460 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 461 0, 462 piixsata_chip_map 463 }, 464 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 465 0, 466 piixsata_chip_map 467 }, 468 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 469 0, 470 piixsata_chip_map 471 }, 472 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 473 0, 474 piixsata_chip_map 475 }, 476 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 477 0, 478 piixsata_chip_map 479 }, 480 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 481 0, 482 piixsata_chip_map 483 }, 484 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 485 0, 486 piixsata_chip_map 487 }, 488 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 489 0, 490 piix_chip_map 491 }, 492 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 493 0, 494 piixsata_chip_map 495 }, 496 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 497 0, 498 piixsata_chip_map 499 }, 500 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 501 0, 502 piixsata_chip_map 503 }, 504 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 505 0, 506 piixsata_chip_map 507 }, 508 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 509 0, 510 piixsata_chip_map 511 }, 512 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 513 0, 514 piixsata_chip_map 515 }, 516 { PCI_PRODUCT_INTEL_82801JD_SATA_1, /* Intel 82801JD (ICH10) SATA */ 517 0, 518 piixsata_chip_map 519 }, 520 { PCI_PRODUCT_INTEL_82801JD_SATA_2, /* Intel 82801JD (ICH10) SATA */ 521 0, 522 piixsata_chip_map 523 }, 524 { PCI_PRODUCT_INTEL_82801JI_SATA_1, /* Intel 82801JI (ICH10) SATA */ 525 0, 526 piixsata_chip_map 527 }, 528 { PCI_PRODUCT_INTEL_82801JI_SATA_2, /* Intel 82801JI (ICH10) SATA */ 529 0, 530 piixsata_chip_map 531 }, 532 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 533 0, 534 piixsata_chip_map 535 }, 536 { PCI_PRODUCT_INTEL_3400_SATA_1, /* Intel 3400 SATA */ 537 0, 538 piixsata_chip_map 539 }, 540 { PCI_PRODUCT_INTEL_3400_SATA_2, /* Intel 3400 SATA */ 541 0, 542 piixsata_chip_map 543 }, 544 { PCI_PRODUCT_INTEL_3400_SATA_3, /* Intel 3400 SATA */ 545 0, 546 piixsata_chip_map 547 }, 548 { PCI_PRODUCT_INTEL_3400_SATA_4, /* Intel 3400 SATA */ 549 0, 550 piixsata_chip_map 551 }, 552 { PCI_PRODUCT_INTEL_3400_SATA_5, /* Intel 3400 SATA */ 553 0, 554 piixsata_chip_map 555 }, 556 { PCI_PRODUCT_INTEL_3400_SATA_6, /* Intel 3400 SATA */ 557 0, 558 piixsata_chip_map 559 }, 560 { PCI_PRODUCT_INTEL_C600_SATA, /* Intel C600 SATA */ 561 0, 562 piixsata_chip_map 563 }, 564 { PCI_PRODUCT_INTEL_6SERIES_SATA_1, /* Intel 6 Series SATA */ 565 0, 566 piixsata_chip_map 567 }, 568 { PCI_PRODUCT_INTEL_6SERIES_SATA_2, /* Intel 6 Series SATA */ 569 0, 570 piixsata_chip_map 571 }, 572 { PCI_PRODUCT_INTEL_6SERIES_SATA_3, /* Intel 6 Series SATA */ 573 0, 574 piixsata_chip_map 575 }, 576 { PCI_PRODUCT_INTEL_6SERIES_SATA_4, /* Intel 6 Series SATA */ 577 0, 578 piixsata_chip_map 579 }, 580 { PCI_PRODUCT_INTEL_7SERIES_SATA_1, /* Intel 7 Series SATA */ 581 0, 582 piixsata_chip_map 583 }, 584 { PCI_PRODUCT_INTEL_7SERIES_SATA_2, /* Intel 7 Series SATA */ 585 0, 586 piixsata_chip_map 587 }, 588 { PCI_PRODUCT_INTEL_7SERIES_SATA_3, /* Intel 7 Series SATA */ 589 0, 590 piixsata_chip_map 591 }, 592 { PCI_PRODUCT_INTEL_7SERIES_SATA_4, /* Intel 7 Series SATA */ 593 0, 594 piixsata_chip_map 595 }, 596 { PCI_PRODUCT_INTEL_8SERIES_SATA_1, /* Intel 8 Series SATA */ 597 0, 598 piixsata_chip_map 599 }, 600 { PCI_PRODUCT_INTEL_8SERIES_SATA_2, /* Intel 8 Series SATA */ 601 0, 602 piixsata_chip_map 603 }, 604 { PCI_PRODUCT_INTEL_8SERIES_SATA_3, /* Intel 8 Series SATA */ 605 0, 606 piixsata_chip_map 607 }, 608 { PCI_PRODUCT_INTEL_8SERIES_SATA_4, /* Intel 8 Series SATA */ 609 0, 610 piixsata_chip_map 611 }, 612 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_1, /* Intel 8 Series SATA */ 613 0, 614 piixsata_chip_map 615 }, 616 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_2, /* Intel 8 Series SATA */ 617 0, 618 piixsata_chip_map 619 }, 620 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_3, /* Intel 8 Series SATA */ 621 0, 622 piixsata_chip_map 623 }, 624 { PCI_PRODUCT_INTEL_8SERIES_LP_SATA_4, /* Intel 8 Series SATA */ 625 0, 626 piixsata_chip_map 627 }, 628 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_1, /* Intel Atom C2000 SATA */ 629 0, 630 piixsata_chip_map 631 }, 632 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_2, /* Intel Atom C2000 SATA */ 633 0, 634 piixsata_chip_map 635 }, 636 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_3, /* Intel Atom C2000 SATA */ 637 0, 638 piixsata_chip_map 639 }, 640 { PCI_PRODUCT_INTEL_ATOMC2000_SATA_4, /* Intel Atom C2000 SATA */ 641 0, 642 piixsata_chip_map 643 }, 644 { PCI_PRODUCT_INTEL_BAYTRAIL_SATA_1, /* Intel Baytrail SATA */ 645 0, 646 piixsata_chip_map 647 }, 648 { PCI_PRODUCT_INTEL_BAYTRAIL_SATA_2, /* Intel Baytrail SATA */ 649 0, 650 piixsata_chip_map 651 }, 652 { PCI_PRODUCT_INTEL_EP80579_SATA, /* Intel EP80579 SATA */ 653 0, 654 piixsata_chip_map 655 }, 656 { PCI_PRODUCT_INTEL_DH8900_SATA_1, /* Intel DH8900 SATA */ 657 0, 658 piixsata_chip_map 659 }, 660 { PCI_PRODUCT_INTEL_DH8900_SATA_2, /* Intel DH8900 SATA */ 661 0, 662 piixsata_chip_map 663 }, 664 { PCI_PRODUCT_INTEL_SCH_IDE, /* Intel SCH IDE */ 665 0, 666 sch_chip_map 667 } 668 }; 669 670 const struct pciide_product_desc pciide_amd_products[] = { 671 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 672 0, 673 amd756_chip_map 674 }, 675 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 676 0, 677 amd756_chip_map 678 }, 679 { PCI_PRODUCT_AMD_PBC768_IDE, 680 0, 681 amd756_chip_map 682 }, 683 { PCI_PRODUCT_AMD_8111_IDE, 684 0, 685 amd756_chip_map 686 }, 687 { PCI_PRODUCT_AMD_CS5536_IDE, 688 0, 689 amd756_chip_map 690 }, 691 { PCI_PRODUCT_AMD_HUDSON2_IDE, 692 0, 693 ixp_chip_map 694 } 695 }; 696 697 #ifdef notyet 698 const struct pciide_product_desc pciide_opti_products[] = { 699 700 { PCI_PRODUCT_OPTI_82C621, 701 0, 702 opti_chip_map 703 }, 704 { PCI_PRODUCT_OPTI_82C568, 705 0, 706 opti_chip_map 707 }, 708 { PCI_PRODUCT_OPTI_82D568, 709 0, 710 opti_chip_map 711 } 712 }; 713 #endif 714 715 const struct pciide_product_desc pciide_cmd_products[] = { 716 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 717 0, 718 cmd_chip_map 719 }, 720 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 721 0, 722 cmd0643_9_chip_map 723 }, 724 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 725 0, 726 cmd0643_9_chip_map 727 }, 728 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 729 0, 730 cmd0643_9_chip_map 731 }, 732 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 733 0, 734 cmd0643_9_chip_map 735 }, 736 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 737 IDE_PCI_CLASS_OVERRIDE, 738 cmd680_chip_map 739 }, 740 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 741 0, 742 sii3112_chip_map 743 }, 744 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 745 0, 746 sii3112_chip_map 747 }, 748 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 749 0, 750 sii3112_chip_map 751 }, 752 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 753 0, 754 sii3114_chip_map 755 } 756 }; 757 758 const struct pciide_product_desc pciide_via_products[] = { 759 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 760 0, 761 apollo_chip_map 762 }, 763 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 764 0, 765 apollo_chip_map 766 }, 767 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 768 IDE_PCI_CLASS_OVERRIDE, 769 apollo_chip_map 770 }, 771 { PCI_PRODUCT_VIATECH_VT6415, /* VIA VT6415 IDE */ 772 IDE_PCI_CLASS_OVERRIDE, 773 apollo_chip_map 774 }, 775 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 776 0, 777 apollo_chip_map 778 }, 779 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 780 0, 781 apollo_chip_map 782 }, 783 { PCI_PRODUCT_VIATECH_VX855_IDE, /* VIA VX855 IDE */ 784 0, 785 apollo_chip_map 786 }, 787 { PCI_PRODUCT_VIATECH_VX900_IDE, /* VIA VX900 IDE */ 788 0, 789 apollo_chip_map 790 }, 791 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 792 0, 793 sata_chip_map 794 }, 795 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 796 0, 797 sata_chip_map 798 }, 799 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 800 0, 801 sata_chip_map 802 }, 803 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 804 0, 805 sata_chip_map 806 }, 807 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 808 0, 809 sata_chip_map 810 }, 811 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 812 0, 813 sata_chip_map 814 } 815 }; 816 817 const struct pciide_product_desc pciide_cypress_products[] = { 818 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 819 IDE_16BIT_IOSPACE, 820 cy693_chip_map 821 } 822 }; 823 824 const struct pciide_product_desc pciide_sis_products[] = { 825 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 826 0, 827 sis_chip_map 828 }, 829 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 830 0, 831 sata_chip_map 832 }, 833 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 834 0, 835 sata_chip_map 836 }, 837 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 838 0, 839 sata_chip_map 840 }, 841 { PCI_PRODUCT_SIS_1183, /* SIS 1183 SATA */ 842 0, 843 sata_chip_map 844 } 845 }; 846 847 /* 848 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 849 * has been banished to the MD i386 pciide_machdep 850 */ 851 const struct pciide_product_desc pciide_natsemi_products[] = { 852 #ifdef __i386__ 853 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 854 0, 855 gcsc_chip_map 856 }, 857 #endif 858 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 859 0, 860 natsemi_chip_map 861 }, 862 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 863 0, 864 ns_scx200_chip_map 865 } 866 }; 867 868 const struct pciide_product_desc pciide_acer_products[] = { 869 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 870 0, 871 acer_chip_map 872 } 873 }; 874 875 const struct pciide_product_desc pciide_triones_products[] = { 876 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 877 IDE_PCI_CLASS_OVERRIDE, 878 hpt_chip_map, 879 }, 880 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 881 IDE_PCI_CLASS_OVERRIDE, 882 hpt_chip_map 883 }, 884 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 885 IDE_PCI_CLASS_OVERRIDE, 886 hpt_chip_map 887 }, 888 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 889 IDE_PCI_CLASS_OVERRIDE, 890 hpt_chip_map 891 }, 892 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 893 IDE_PCI_CLASS_OVERRIDE, 894 hpt_chip_map 895 } 896 }; 897 898 const struct pciide_product_desc pciide_promise_products[] = { 899 { PCI_PRODUCT_PROMISE_PDC20246, 900 IDE_PCI_CLASS_OVERRIDE, 901 pdc202xx_chip_map, 902 }, 903 { PCI_PRODUCT_PROMISE_PDC20262, 904 IDE_PCI_CLASS_OVERRIDE, 905 pdc202xx_chip_map, 906 }, 907 { PCI_PRODUCT_PROMISE_PDC20265, 908 IDE_PCI_CLASS_OVERRIDE, 909 pdc202xx_chip_map, 910 }, 911 { PCI_PRODUCT_PROMISE_PDC20267, 912 IDE_PCI_CLASS_OVERRIDE, 913 pdc202xx_chip_map, 914 }, 915 { PCI_PRODUCT_PROMISE_PDC20268, 916 IDE_PCI_CLASS_OVERRIDE, 917 pdc202xx_chip_map, 918 }, 919 { PCI_PRODUCT_PROMISE_PDC20268R, 920 IDE_PCI_CLASS_OVERRIDE, 921 pdc202xx_chip_map, 922 }, 923 { PCI_PRODUCT_PROMISE_PDC20269, 924 IDE_PCI_CLASS_OVERRIDE, 925 pdc202xx_chip_map, 926 }, 927 { PCI_PRODUCT_PROMISE_PDC20271, 928 IDE_PCI_CLASS_OVERRIDE, 929 pdc202xx_chip_map, 930 }, 931 { PCI_PRODUCT_PROMISE_PDC20275, 932 IDE_PCI_CLASS_OVERRIDE, 933 pdc202xx_chip_map, 934 }, 935 { PCI_PRODUCT_PROMISE_PDC20276, 936 IDE_PCI_CLASS_OVERRIDE, 937 pdc202xx_chip_map, 938 }, 939 { PCI_PRODUCT_PROMISE_PDC20277, 940 IDE_PCI_CLASS_OVERRIDE, 941 pdc202xx_chip_map, 942 }, 943 { PCI_PRODUCT_PROMISE_PDC20318, 944 IDE_PCI_CLASS_OVERRIDE, 945 pdcsata_chip_map, 946 }, 947 { PCI_PRODUCT_PROMISE_PDC20319, 948 IDE_PCI_CLASS_OVERRIDE, 949 pdcsata_chip_map, 950 }, 951 { PCI_PRODUCT_PROMISE_PDC20371, 952 IDE_PCI_CLASS_OVERRIDE, 953 pdcsata_chip_map, 954 }, 955 { PCI_PRODUCT_PROMISE_PDC20375, 956 IDE_PCI_CLASS_OVERRIDE, 957 pdcsata_chip_map, 958 }, 959 { PCI_PRODUCT_PROMISE_PDC20376, 960 IDE_PCI_CLASS_OVERRIDE, 961 pdcsata_chip_map, 962 }, 963 { PCI_PRODUCT_PROMISE_PDC20377, 964 IDE_PCI_CLASS_OVERRIDE, 965 pdcsata_chip_map, 966 }, 967 { PCI_PRODUCT_PROMISE_PDC20378, 968 IDE_PCI_CLASS_OVERRIDE, 969 pdcsata_chip_map, 970 }, 971 { PCI_PRODUCT_PROMISE_PDC20379, 972 IDE_PCI_CLASS_OVERRIDE, 973 pdcsata_chip_map, 974 }, 975 { PCI_PRODUCT_PROMISE_PDC40518, 976 IDE_PCI_CLASS_OVERRIDE, 977 pdcsata_chip_map, 978 }, 979 { PCI_PRODUCT_PROMISE_PDC40519, 980 IDE_PCI_CLASS_OVERRIDE, 981 pdcsata_chip_map, 982 }, 983 { PCI_PRODUCT_PROMISE_PDC40718, 984 IDE_PCI_CLASS_OVERRIDE, 985 pdcsata_chip_map, 986 }, 987 { PCI_PRODUCT_PROMISE_PDC40719, 988 IDE_PCI_CLASS_OVERRIDE, 989 pdcsata_chip_map, 990 }, 991 { PCI_PRODUCT_PROMISE_PDC40779, 992 IDE_PCI_CLASS_OVERRIDE, 993 pdcsata_chip_map, 994 }, 995 { PCI_PRODUCT_PROMISE_PDC20571, 996 IDE_PCI_CLASS_OVERRIDE, 997 pdcsata_chip_map, 998 }, 999 { PCI_PRODUCT_PROMISE_PDC20575, 1000 IDE_PCI_CLASS_OVERRIDE, 1001 pdcsata_chip_map, 1002 }, 1003 { PCI_PRODUCT_PROMISE_PDC20579, 1004 IDE_PCI_CLASS_OVERRIDE, 1005 pdcsata_chip_map, 1006 }, 1007 { PCI_PRODUCT_PROMISE_PDC20771, 1008 IDE_PCI_CLASS_OVERRIDE, 1009 pdcsata_chip_map, 1010 }, 1011 { PCI_PRODUCT_PROMISE_PDC20775, 1012 IDE_PCI_CLASS_OVERRIDE, 1013 pdcsata_chip_map, 1014 } 1015 }; 1016 1017 const struct pciide_product_desc pciide_acard_products[] = { 1018 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 1019 IDE_PCI_CLASS_OVERRIDE, 1020 acard_chip_map, 1021 }, 1022 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 1023 IDE_PCI_CLASS_OVERRIDE, 1024 acard_chip_map, 1025 }, 1026 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 1027 IDE_PCI_CLASS_OVERRIDE, 1028 acard_chip_map, 1029 }, 1030 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 1031 IDE_PCI_CLASS_OVERRIDE, 1032 acard_chip_map, 1033 }, 1034 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 1035 IDE_PCI_CLASS_OVERRIDE, 1036 acard_chip_map, 1037 } 1038 }; 1039 1040 const struct pciide_product_desc pciide_serverworks_products[] = { 1041 { PCI_PRODUCT_RCC_OSB4_IDE, 1042 0, 1043 serverworks_chip_map, 1044 }, 1045 { PCI_PRODUCT_RCC_CSB5_IDE, 1046 0, 1047 serverworks_chip_map, 1048 }, 1049 { PCI_PRODUCT_RCC_CSB6_IDE, 1050 0, 1051 serverworks_chip_map, 1052 }, 1053 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 1054 0, 1055 serverworks_chip_map, 1056 }, 1057 { PCI_PRODUCT_RCC_HT_1000_IDE, 1058 0, 1059 serverworks_chip_map, 1060 }, 1061 { PCI_PRODUCT_RCC_K2_SATA, 1062 0, 1063 svwsata_chip_map, 1064 }, 1065 { PCI_PRODUCT_RCC_FRODO4_SATA, 1066 0, 1067 svwsata_chip_map, 1068 }, 1069 { PCI_PRODUCT_RCC_FRODO8_SATA, 1070 0, 1071 svwsata_chip_map, 1072 }, 1073 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 1074 0, 1075 svwsata_chip_map, 1076 }, 1077 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 1078 0, 1079 svwsata_chip_map, 1080 } 1081 }; 1082 1083 const struct pciide_product_desc pciide_nvidia_products[] = { 1084 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 1085 0, 1086 nforce_chip_map 1087 }, 1088 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 1089 0, 1090 nforce_chip_map 1091 }, 1092 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 1093 0, 1094 nforce_chip_map 1095 }, 1096 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 1097 0, 1098 nforce_chip_map 1099 }, 1100 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 1101 0, 1102 nforce_chip_map 1103 }, 1104 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 1105 0, 1106 nforce_chip_map 1107 }, 1108 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 1109 0, 1110 nforce_chip_map 1111 }, 1112 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 1113 0, 1114 nforce_chip_map 1115 }, 1116 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 1117 0, 1118 nforce_chip_map 1119 }, 1120 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 1121 0, 1122 nforce_chip_map 1123 }, 1124 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 1125 0, 1126 nforce_chip_map 1127 }, 1128 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 1129 0, 1130 nforce_chip_map 1131 }, 1132 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 1133 0, 1134 nforce_chip_map 1135 }, 1136 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 1137 0, 1138 nforce_chip_map 1139 }, 1140 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 1141 0, 1142 sata_chip_map 1143 }, 1144 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 1145 0, 1146 sata_chip_map 1147 }, 1148 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 1149 0, 1150 sata_chip_map 1151 }, 1152 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 1153 0, 1154 sata_chip_map 1155 }, 1156 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 1157 0, 1158 sata_chip_map 1159 }, 1160 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 1161 0, 1162 sata_chip_map 1163 }, 1164 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 1165 0, 1166 sata_chip_map 1167 }, 1168 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 1169 0, 1170 sata_chip_map 1171 }, 1172 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1173 0, 1174 sata_chip_map 1175 }, 1176 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1177 0, 1178 sata_chip_map 1179 }, 1180 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1181 0, 1182 sata_chip_map 1183 }, 1184 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1185 0, 1186 sata_chip_map 1187 }, 1188 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1189 0, 1190 sata_chip_map 1191 }, 1192 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1193 0, 1194 sata_chip_map 1195 }, 1196 { PCI_PRODUCT_NVIDIA_MCP65_SATA_1, 1197 0, 1198 sata_chip_map 1199 }, 1200 { PCI_PRODUCT_NVIDIA_MCP65_SATA_2, 1201 0, 1202 sata_chip_map 1203 }, 1204 { PCI_PRODUCT_NVIDIA_MCP65_SATA_3, 1205 0, 1206 sata_chip_map 1207 }, 1208 { PCI_PRODUCT_NVIDIA_MCP65_SATA_4, 1209 0, 1210 sata_chip_map 1211 }, 1212 { PCI_PRODUCT_NVIDIA_MCP67_SATA_1, 1213 0, 1214 sata_chip_map 1215 }, 1216 { PCI_PRODUCT_NVIDIA_MCP67_SATA_2, 1217 0, 1218 sata_chip_map 1219 }, 1220 { PCI_PRODUCT_NVIDIA_MCP67_SATA_3, 1221 0, 1222 sata_chip_map 1223 }, 1224 { PCI_PRODUCT_NVIDIA_MCP67_SATA_4, 1225 0, 1226 sata_chip_map 1227 }, 1228 { PCI_PRODUCT_NVIDIA_MCP77_SATA_1, 1229 0, 1230 sata_chip_map 1231 }, 1232 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1233 0, 1234 sata_chip_map 1235 }, 1236 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1237 0, 1238 sata_chip_map 1239 }, 1240 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1241 0, 1242 sata_chip_map 1243 }, 1244 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1245 0, 1246 sata_chip_map 1247 }, 1248 { PCI_PRODUCT_NVIDIA_MCP89_SATA_1, 1249 0, 1250 sata_chip_map 1251 }, 1252 { PCI_PRODUCT_NVIDIA_MCP89_SATA_2, 1253 0, 1254 sata_chip_map 1255 }, 1256 { PCI_PRODUCT_NVIDIA_MCP89_SATA_3, 1257 0, 1258 sata_chip_map 1259 }, 1260 { PCI_PRODUCT_NVIDIA_MCP89_SATA_4, 1261 0, 1262 sata_chip_map 1263 } 1264 }; 1265 1266 const struct pciide_product_desc pciide_ite_products[] = { 1267 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1268 IDE_PCI_CLASS_OVERRIDE, 1269 ite_chip_map 1270 }, 1271 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1272 IDE_PCI_CLASS_OVERRIDE, 1273 ite_chip_map 1274 } 1275 }; 1276 1277 const struct pciide_product_desc pciide_ati_products[] = { 1278 { PCI_PRODUCT_ATI_SB200_IDE, 1279 0, 1280 ixp_chip_map 1281 }, 1282 { PCI_PRODUCT_ATI_SB300_IDE, 1283 0, 1284 ixp_chip_map 1285 }, 1286 { PCI_PRODUCT_ATI_SB400_IDE, 1287 0, 1288 ixp_chip_map 1289 }, 1290 { PCI_PRODUCT_ATI_SB600_IDE, 1291 0, 1292 ixp_chip_map 1293 }, 1294 { PCI_PRODUCT_ATI_SB700_IDE, 1295 0, 1296 ixp_chip_map 1297 }, 1298 { PCI_PRODUCT_ATI_SB300_SATA, 1299 0, 1300 sii3112_chip_map 1301 }, 1302 { PCI_PRODUCT_ATI_SB400_SATA_1, 1303 0, 1304 sii3112_chip_map 1305 }, 1306 { PCI_PRODUCT_ATI_SB400_SATA_2, 1307 0, 1308 sii3112_chip_map 1309 } 1310 }; 1311 1312 const struct pciide_product_desc pciide_jmicron_products[] = { 1313 { PCI_PRODUCT_JMICRON_JMB361, 1314 0, 1315 jmicron_chip_map 1316 }, 1317 { PCI_PRODUCT_JMICRON_JMB363, 1318 0, 1319 jmicron_chip_map 1320 }, 1321 { PCI_PRODUCT_JMICRON_JMB365, 1322 0, 1323 jmicron_chip_map 1324 }, 1325 { PCI_PRODUCT_JMICRON_JMB366, 1326 0, 1327 jmicron_chip_map 1328 }, 1329 { PCI_PRODUCT_JMICRON_JMB368, 1330 0, 1331 jmicron_chip_map 1332 } 1333 }; 1334 1335 const struct pciide_product_desc pciide_phison_products[] = { 1336 { PCI_PRODUCT_PHISON_PS5000, 1337 0, 1338 phison_chip_map 1339 }, 1340 }; 1341 1342 const struct pciide_product_desc pciide_rdc_products[] = { 1343 { PCI_PRODUCT_RDC_R1012_IDE, 1344 0, 1345 rdc_chip_map 1346 }, 1347 }; 1348 1349 struct pciide_vendor_desc { 1350 u_int32_t ide_vendor; 1351 const struct pciide_product_desc *ide_products; 1352 int ide_nproducts; 1353 }; 1354 1355 const struct pciide_vendor_desc pciide_vendors[] = { 1356 { PCI_VENDOR_INTEL, pciide_intel_products, 1357 nitems(pciide_intel_products) }, 1358 { PCI_VENDOR_AMD, pciide_amd_products, 1359 nitems(pciide_amd_products) }, 1360 #ifdef notyet 1361 { PCI_VENDOR_OPTI, pciide_opti_products, 1362 nitems(pciide_opti_products) }, 1363 #endif 1364 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1365 nitems(pciide_cmd_products) }, 1366 { PCI_VENDOR_VIATECH, pciide_via_products, 1367 nitems(pciide_via_products) }, 1368 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1369 nitems(pciide_cypress_products) }, 1370 { PCI_VENDOR_SIS, pciide_sis_products, 1371 nitems(pciide_sis_products) }, 1372 { PCI_VENDOR_NS, pciide_natsemi_products, 1373 nitems(pciide_natsemi_products) }, 1374 { PCI_VENDOR_ALI, pciide_acer_products, 1375 nitems(pciide_acer_products) }, 1376 { PCI_VENDOR_TRIONES, pciide_triones_products, 1377 nitems(pciide_triones_products) }, 1378 { PCI_VENDOR_ACARD, pciide_acard_products, 1379 nitems(pciide_acard_products) }, 1380 { PCI_VENDOR_RCC, pciide_serverworks_products, 1381 nitems(pciide_serverworks_products) }, 1382 { PCI_VENDOR_PROMISE, pciide_promise_products, 1383 nitems(pciide_promise_products) }, 1384 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1385 nitems(pciide_nvidia_products) }, 1386 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1387 nitems(pciide_ite_products) }, 1388 { PCI_VENDOR_ATI, pciide_ati_products, 1389 nitems(pciide_ati_products) }, 1390 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1391 nitems(pciide_jmicron_products) }, 1392 { PCI_VENDOR_PHISON, pciide_phison_products, 1393 nitems(pciide_phison_products) }, 1394 { PCI_VENDOR_RDC, pciide_rdc_products, 1395 nitems(pciide_rdc_products) } 1396 }; 1397 1398 /* options passed via the 'flags' config keyword */ 1399 #define PCIIDE_OPTIONS_DMA 0x01 1400 1401 int pciide_match(struct device *, void *, void *); 1402 void pciide_attach(struct device *, struct device *, void *); 1403 int pciide_detach(struct device *, int); 1404 int pciide_activate(struct device *, int); 1405 1406 struct cfattach pciide_pci_ca = { 1407 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1408 pciide_detach, pciide_activate 1409 }; 1410 1411 struct cfattach pciide_jmb_ca = { 1412 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1413 pciide_detach, pciide_activate 1414 }; 1415 1416 struct cfdriver pciide_cd = { 1417 NULL, "pciide", DV_DULL 1418 }; 1419 1420 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1421 1422 const struct pciide_product_desc * 1423 pciide_lookup_product(u_int32_t id) 1424 { 1425 const struct pciide_product_desc *pp; 1426 const struct pciide_vendor_desc *vp; 1427 int i; 1428 1429 for (i = 0, vp = pciide_vendors; i < nitems(pciide_vendors); vp++, i++) 1430 if (PCI_VENDOR(id) == vp->ide_vendor) 1431 break; 1432 1433 if (i == nitems(pciide_vendors)) 1434 return (NULL); 1435 1436 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1437 if (PCI_PRODUCT(id) == pp->ide_product) 1438 break; 1439 1440 if (i == vp->ide_nproducts) 1441 return (NULL); 1442 return (pp); 1443 } 1444 1445 int 1446 pciide_match(struct device *parent, void *match, void *aux) 1447 { 1448 struct pci_attach_args *pa = aux; 1449 const struct pciide_product_desc *pp; 1450 1451 /* 1452 * Some IDE controllers have severe bugs when used in PCI mode. 1453 * We punt and attach them to the ISA bus instead. 1454 */ 1455 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1456 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1457 return (0); 1458 1459 /* 1460 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1461 * controllers. Let see if we can deal with it anyway. 1462 */ 1463 pp = pciide_lookup_product(pa->pa_id); 1464 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1465 return (1); 1466 1467 /* 1468 * Check the ID register to see that it's a PCI IDE controller. 1469 * If it is, we assume that we can deal with it; it _should_ 1470 * work in a standardized way... 1471 */ 1472 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1473 switch (PCI_SUBCLASS(pa->pa_class)) { 1474 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1475 return (1); 1476 1477 /* 1478 * We only match these if we know they have 1479 * a match, as we may not support native interfaces 1480 * on them. 1481 */ 1482 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1483 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1484 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1485 if (pp) 1486 return (1); 1487 else 1488 return (0); 1489 break; 1490 } 1491 } 1492 1493 return (0); 1494 } 1495 1496 void 1497 pciide_attach(struct device *parent, struct device *self, void *aux) 1498 { 1499 struct pciide_softc *sc = (struct pciide_softc *)self; 1500 struct pci_attach_args *pa = aux; 1501 1502 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1503 if (sc->sc_pp == NULL) 1504 sc->sc_pp = &default_product_desc; 1505 sc->sc_rev = PCI_REVISION(pa->pa_class); 1506 1507 sc->sc_pc = pa->pa_pc; 1508 sc->sc_tag = pa->pa_tag; 1509 1510 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1511 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1512 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1513 1514 sc->sc_dmacmd_read = pciide_dmacmd_read; 1515 sc->sc_dmacmd_write = pciide_dmacmd_write; 1516 sc->sc_dmactl_read = pciide_dmactl_read; 1517 sc->sc_dmactl_write = pciide_dmactl_write; 1518 sc->sc_dmatbl_write = pciide_dmatbl_write; 1519 1520 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1521 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1522 1523 sc->sc_pp->chip_map(sc, pa); 1524 1525 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1526 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1527 DEBUG_PROBE); 1528 } 1529 1530 int 1531 pciide_detach(struct device *self, int flags) 1532 { 1533 struct pciide_softc *sc = (struct pciide_softc *)self; 1534 if (sc->chip_unmap == NULL) 1535 panic("unmap not yet implemented for this chipset"); 1536 else 1537 sc->chip_unmap(sc, flags); 1538 1539 return 0; 1540 } 1541 1542 int 1543 pciide_activate(struct device *self, int act) 1544 { 1545 int rv = 0; 1546 struct pciide_softc *sc = (struct pciide_softc *)self; 1547 int i; 1548 1549 switch (act) { 1550 case DVACT_SUSPEND: 1551 rv = config_activate_children(self, act); 1552 1553 for (i = 0; i < nitems(sc->sc_save); i++) 1554 sc->sc_save[i] = pci_conf_read(sc->sc_pc, 1555 sc->sc_tag, PCI_MAPREG_END + 0x18 + (i * 4)); 1556 1557 if (sc->sc_pp->chip_map == sch_chip_map) { 1558 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1559 sc->sc_tag, SCH_D0TIM); 1560 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1561 sc->sc_tag, SCH_D1TIM); 1562 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1563 sc->sc_save2[0] = pciide_pci_read(sc->sc_pc, 1564 sc->sc_tag, ICH5_SATA_MAP); 1565 sc->sc_save2[1] = pciide_pci_read(sc->sc_pc, 1566 sc->sc_tag, ICH5_SATA_PI); 1567 sc->sc_save2[2] = pciide_pci_read(sc->sc_pc, 1568 sc->sc_tag, ICH_SATA_PCS); 1569 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1570 sc->sc_save[0] = pci_conf_read(sc->sc_pc, 1571 sc->sc_tag, SII3112_SCS_CMD); 1572 sc->sc_save[1] = pci_conf_read(sc->sc_pc, 1573 sc->sc_tag, SII3112_PCI_CFGCTL); 1574 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1575 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1576 sc->sc_tag, IT_TIM(0)); 1577 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1578 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1579 sc->sc_tag, NFORCE_PIODMATIM); 1580 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1581 sc->sc_tag, NFORCE_PIOTIM); 1582 sc->sc_save2[2] = pci_conf_read(sc->sc_pc, 1583 sc->sc_tag, NFORCE_UDMATIM); 1584 } 1585 break; 1586 case DVACT_RESUME: 1587 for (i = 0; i < nitems(sc->sc_save); i++) 1588 pci_conf_write(sc->sc_pc, sc->sc_tag, 1589 PCI_MAPREG_END + 0x18 + (i * 4), 1590 sc->sc_save[i]); 1591 1592 if (sc->sc_pp->chip_map == default_chip_map || 1593 sc->sc_pp->chip_map == sata_chip_map || 1594 sc->sc_pp->chip_map == piix_chip_map || 1595 sc->sc_pp->chip_map == amd756_chip_map || 1596 sc->sc_pp->chip_map == phison_chip_map || 1597 sc->sc_pp->chip_map == rdc_chip_map || 1598 sc->sc_pp->chip_map == ixp_chip_map || 1599 sc->sc_pp->chip_map == acard_chip_map || 1600 sc->sc_pp->chip_map == apollo_chip_map || 1601 sc->sc_pp->chip_map == sis_chip_map) { 1602 /* nothing to restore -- uses only 0x40 - 0x56 */ 1603 } else if (sc->sc_pp->chip_map == sch_chip_map) { 1604 pci_conf_write(sc->sc_pc, sc->sc_tag, 1605 SCH_D0TIM, sc->sc_save2[0]); 1606 pci_conf_write(sc->sc_pc, sc->sc_tag, 1607 SCH_D1TIM, sc->sc_save2[1]); 1608 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1609 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1610 ICH5_SATA_MAP, sc->sc_save2[0]); 1611 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1612 ICH5_SATA_PI, sc->sc_save2[1]); 1613 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1614 ICH_SATA_PCS, sc->sc_save2[2]); 1615 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1616 pci_conf_write(sc->sc_pc, sc->sc_tag, 1617 SII3112_SCS_CMD, sc->sc_save[0]); 1618 delay(50 * 1000); 1619 pci_conf_write(sc->sc_pc, sc->sc_tag, 1620 SII3112_PCI_CFGCTL, sc->sc_save[1]); 1621 delay(50 * 1000); 1622 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1623 pci_conf_write(sc->sc_pc, sc->sc_tag, 1624 IT_TIM(0), sc->sc_save2[0]); 1625 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1626 pci_conf_write(sc->sc_pc, sc->sc_tag, 1627 NFORCE_PIODMATIM, sc->sc_save2[0]); 1628 pci_conf_write(sc->sc_pc, sc->sc_tag, 1629 NFORCE_PIOTIM, sc->sc_save2[1]); 1630 pci_conf_write(sc->sc_pc, sc->sc_tag, 1631 NFORCE_UDMATIM, sc->sc_save2[2]); 1632 } else { 1633 printf("%s: restore for unknown chip map %x\n", 1634 sc->sc_wdcdev.sc_dev.dv_xname, 1635 sc->sc_pp->ide_product); 1636 } 1637 1638 rv = config_activate_children(self, act); 1639 break; 1640 default: 1641 rv = config_activate_children(self, act); 1642 break; 1643 } 1644 return (rv); 1645 } 1646 1647 int 1648 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1649 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1650 { 1651 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1652 struct channel_softc *wdc_cp = &cp->wdc_channel; 1653 pcireg_t csr; 1654 1655 cp->compat = 1; 1656 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1657 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1658 1659 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1660 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1661 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1662 1663 wdc_cp->cmd_iot = pa->pa_iot; 1664 1665 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1666 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1667 printf("%s: couldn't map %s cmd regs\n", 1668 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1669 return (0); 1670 } 1671 1672 wdc_cp->ctl_iot = pa->pa_iot; 1673 1674 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1675 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1676 printf("%s: couldn't map %s ctl regs\n", 1677 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1678 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1679 PCIIDE_COMPAT_CMD_SIZE); 1680 return (0); 1681 } 1682 wdc_cp->cmd_iosz = *cmdsizep; 1683 wdc_cp->ctl_iosz = *ctlsizep; 1684 1685 return (1); 1686 } 1687 1688 int 1689 pciide_unmapregs_compat(struct pciide_softc *sc, struct pciide_channel *cp) 1690 { 1691 struct channel_softc *wdc_cp = &cp->wdc_channel; 1692 1693 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1694 bus_space_unmap(wdc_cp->ctl_iot, wdc_cp->cmd_ioh, wdc_cp->ctl_iosz); 1695 1696 if (sc->sc_pci_ih != NULL) { 1697 pciide_machdep_compat_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1698 sc->sc_pci_ih = NULL; 1699 } 1700 1701 return (0); 1702 } 1703 1704 int 1705 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1706 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1707 { 1708 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1709 struct channel_softc *wdc_cp = &cp->wdc_channel; 1710 const char *intrstr; 1711 pci_intr_handle_t intrhandle; 1712 pcireg_t maptype; 1713 1714 cp->compat = 0; 1715 1716 if (sc->sc_pci_ih == NULL) { 1717 if (pci_intr_map(pa, &intrhandle) != 0) { 1718 printf("%s: couldn't map native-PCI interrupt\n", 1719 sc->sc_wdcdev.sc_dev.dv_xname); 1720 return (0); 1721 } 1722 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1723 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1724 intrhandle, IPL_BIO, pci_intr, sc, 1725 sc->sc_wdcdev.sc_dev.dv_xname); 1726 if (sc->sc_pci_ih != NULL) { 1727 printf("%s: using %s for native-PCI interrupt\n", 1728 sc->sc_wdcdev.sc_dev.dv_xname, 1729 intrstr ? intrstr : "unknown interrupt"); 1730 } else { 1731 printf("%s: couldn't establish native-PCI interrupt", 1732 sc->sc_wdcdev.sc_dev.dv_xname); 1733 if (intrstr != NULL) 1734 printf(" at %s", intrstr); 1735 printf("\n"); 1736 return (0); 1737 } 1738 } 1739 cp->ih = sc->sc_pci_ih; 1740 sc->sc_pc = pa->pa_pc; 1741 1742 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1743 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1744 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1745 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1746 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1747 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1748 maptype, 0, 1749 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1750 printf("%s: couldn't map %s cmd regs\n", 1751 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1752 return (0); 1753 } 1754 1755 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1756 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1757 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1758 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1759 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1760 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1761 maptype, 0, 1762 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1763 printf("%s: couldn't map %s ctl regs\n", 1764 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1765 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1766 return (0); 1767 } 1768 /* 1769 * In native mode, 4 bytes of I/O space are mapped for the control 1770 * register, the control register is at offset 2. Pass the generic 1771 * code a handle for only one byte at the right offset. 1772 */ 1773 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1774 &wdc_cp->ctl_ioh) != 0) { 1775 printf("%s: unable to subregion %s ctl regs\n", 1776 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1777 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1778 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1779 return (0); 1780 } 1781 wdc_cp->cmd_iosz = *cmdsizep; 1782 wdc_cp->ctl_iosz = *ctlsizep; 1783 1784 return (1); 1785 } 1786 1787 int 1788 pciide_unmapregs_native(struct pciide_softc *sc, struct pciide_channel *cp) 1789 { 1790 struct channel_softc *wdc_cp = &cp->wdc_channel; 1791 1792 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1793 1794 /* Unmap the whole control space, not just the sub-region */ 1795 bus_space_unmap(wdc_cp->ctl_iot, cp->ctl_baseioh, wdc_cp->ctl_iosz); 1796 1797 if (sc->sc_pci_ih != NULL) { 1798 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1799 sc->sc_pci_ih = NULL; 1800 } 1801 1802 return (0); 1803 } 1804 1805 void 1806 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1807 { 1808 pcireg_t maptype; 1809 bus_addr_t addr; 1810 1811 /* 1812 * Map DMA registers 1813 * 1814 * Note that sc_dma_ok is the right variable to test to see if 1815 * DMA can be done. If the interface doesn't support DMA, 1816 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1817 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1818 * non-zero if the interface supports DMA and the registers 1819 * could be mapped. 1820 * 1821 * XXX Note that despite the fact that the Bus Master IDE specs 1822 * XXX say that "The bus master IDE function uses 16 bytes of IO 1823 * XXX space", some controllers (at least the United 1824 * XXX Microelectronics UM8886BF) place it in memory space. 1825 */ 1826 1827 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1828 PCIIDE_REG_BUS_MASTER_DMA); 1829 1830 switch (maptype) { 1831 case PCI_MAPREG_TYPE_IO: 1832 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1833 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1834 &addr, NULL, NULL) == 0); 1835 if (sc->sc_dma_ok == 0) { 1836 printf(", unused (couldn't query registers)"); 1837 break; 1838 } 1839 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1840 && addr >= 0x10000) { 1841 sc->sc_dma_ok = 0; 1842 printf(", unused (registers at unsafe address %#lx)", addr); 1843 break; 1844 } 1845 /* FALLTHROUGH */ 1846 1847 case PCI_MAPREG_MEM_TYPE_32BIT: 1848 sc->sc_dma_ok = (pci_mapreg_map(pa, 1849 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1850 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_iosz, 1851 0) == 0); 1852 sc->sc_dmat = pa->pa_dmat; 1853 if (sc->sc_dma_ok == 0) { 1854 printf(", unused (couldn't map registers)"); 1855 } else { 1856 sc->sc_wdcdev.dma_arg = sc; 1857 sc->sc_wdcdev.dma_init = pciide_dma_init; 1858 sc->sc_wdcdev.dma_start = pciide_dma_start; 1859 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1860 } 1861 break; 1862 1863 default: 1864 sc->sc_dma_ok = 0; 1865 printf(", (unsupported maptype 0x%x)", maptype); 1866 break; 1867 } 1868 } 1869 1870 void 1871 pciide_unmapreg_dma(struct pciide_softc *sc) 1872 { 1873 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_iosz); 1874 } 1875 1876 int 1877 pciide_intr_flag(struct pciide_channel *cp) 1878 { 1879 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1880 int chan = cp->wdc_channel.channel; 1881 1882 if (cp->dma_in_progress) { 1883 int retry = 10; 1884 int status; 1885 1886 /* Check the status register */ 1887 for (retry = 10; retry > 0; retry--) { 1888 status = PCIIDE_DMACTL_READ(sc, chan); 1889 if (status & IDEDMA_CTL_INTR) { 1890 break; 1891 } 1892 DELAY(5); 1893 } 1894 1895 /* Not for us. */ 1896 if (retry == 0) 1897 return (0); 1898 1899 return (1); 1900 } 1901 1902 return (-1); 1903 } 1904 1905 int 1906 pciide_compat_intr(void *arg) 1907 { 1908 struct pciide_channel *cp = arg; 1909 1910 if (pciide_intr_flag(cp) == 0) 1911 return (0); 1912 1913 #ifdef DIAGNOSTIC 1914 /* should only be called for a compat channel */ 1915 if (cp->compat == 0) 1916 panic("pciide compat intr called for non-compat chan %p", cp); 1917 #endif 1918 return (wdcintr(&cp->wdc_channel)); 1919 } 1920 1921 int 1922 pciide_pci_intr(void *arg) 1923 { 1924 struct pciide_softc *sc = arg; 1925 struct pciide_channel *cp; 1926 struct channel_softc *wdc_cp; 1927 int i, rv, crv; 1928 1929 rv = 0; 1930 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1931 cp = &sc->pciide_channels[i]; 1932 wdc_cp = &cp->wdc_channel; 1933 1934 /* If a compat channel skip. */ 1935 if (cp->compat) 1936 continue; 1937 1938 if (cp->hw_ok == 0) 1939 continue; 1940 1941 if (pciide_intr_flag(cp) == 0) 1942 continue; 1943 1944 crv = wdcintr(wdc_cp); 1945 if (crv == 0) 1946 ; /* leave rv alone */ 1947 else if (crv == 1) 1948 rv = 1; /* claim the intr */ 1949 else if (rv == 0) /* crv should be -1 in this case */ 1950 rv = crv; /* if we've done no better, take it */ 1951 } 1952 return (rv); 1953 } 1954 1955 u_int8_t 1956 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1957 { 1958 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1959 IDEDMA_CMD(chan))); 1960 } 1961 1962 void 1963 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1964 { 1965 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1966 IDEDMA_CMD(chan), val); 1967 } 1968 1969 u_int8_t 1970 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1971 { 1972 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1973 IDEDMA_CTL(chan))); 1974 } 1975 1976 void 1977 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 1978 { 1979 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1980 IDEDMA_CTL(chan), val); 1981 } 1982 1983 void 1984 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 1985 { 1986 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1987 IDEDMA_TBL(chan), val); 1988 } 1989 1990 void 1991 pciide_channel_dma_setup(struct pciide_channel *cp) 1992 { 1993 int drive; 1994 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1995 struct ata_drive_datas *drvp; 1996 1997 for (drive = 0; drive < 2; drive++) { 1998 drvp = &cp->wdc_channel.ch_drive[drive]; 1999 /* If no drive, skip */ 2000 if ((drvp->drive_flags & DRIVE) == 0) 2001 continue; 2002 /* setup DMA if needed */ 2003 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2004 (drvp->drive_flags & DRIVE_UDMA) == 0) || 2005 sc->sc_dma_ok == 0) { 2006 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 2007 continue; 2008 } 2009 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 2010 != 0) { 2011 /* Abort DMA setup */ 2012 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 2013 continue; 2014 } 2015 } 2016 } 2017 2018 int 2019 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 2020 { 2021 bus_dma_segment_t seg; 2022 int error, rseg; 2023 const bus_size_t dma_table_size = 2024 sizeof(struct idedma_table) * NIDEDMA_TABLES; 2025 struct pciide_dma_maps *dma_maps = 2026 &sc->pciide_channels[channel].dma_maps[drive]; 2027 2028 /* If table was already allocated, just return */ 2029 if (dma_maps->dma_table) 2030 return (0); 2031 2032 /* Allocate memory for the DMA tables and map it */ 2033 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 2034 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 2035 BUS_DMA_NOWAIT)) != 0) { 2036 printf("%s:%d: unable to allocate table DMA for " 2037 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2038 channel, drive, error); 2039 return (error); 2040 } 2041 2042 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 2043 dma_table_size, 2044 (caddr_t *)&dma_maps->dma_table, 2045 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 2046 printf("%s:%d: unable to map table DMA for" 2047 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2048 channel, drive, error); 2049 return (error); 2050 } 2051 2052 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 2053 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 2054 seg.ds_addr), DEBUG_PROBE); 2055 2056 /* Create and load table DMA map for this disk */ 2057 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 2058 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 2059 &dma_maps->dmamap_table)) != 0) { 2060 printf("%s:%d: unable to create table DMA map for " 2061 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2062 channel, drive, error); 2063 return (error); 2064 } 2065 if ((error = bus_dmamap_load(sc->sc_dmat, 2066 dma_maps->dmamap_table, 2067 dma_maps->dma_table, 2068 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 2069 printf("%s:%d: unable to load table DMA map for " 2070 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2071 channel, drive, error); 2072 return (error); 2073 } 2074 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 2075 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 2076 /* Create a xfer DMA map for this drive */ 2077 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 2078 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 2079 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2080 &dma_maps->dmamap_xfer)) != 0) { 2081 printf("%s:%d: unable to create xfer DMA map for " 2082 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2083 channel, drive, error); 2084 return (error); 2085 } 2086 return (0); 2087 } 2088 2089 int 2090 pciide_dma_init(void *v, int channel, int drive, void *databuf, 2091 size_t datalen, int flags) 2092 { 2093 struct pciide_softc *sc = v; 2094 int error, seg; 2095 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2096 struct pciide_dma_maps *dma_maps = 2097 &sc->pciide_channels[channel].dma_maps[drive]; 2098 #ifndef BUS_DMA_RAW 2099 #define BUS_DMA_RAW 0 2100 #endif 2101 2102 error = bus_dmamap_load(sc->sc_dmat, 2103 dma_maps->dmamap_xfer, 2104 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 2105 if (error) { 2106 printf("%s:%d: unable to load xfer DMA map for " 2107 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2108 channel, drive, error); 2109 return (error); 2110 } 2111 2112 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2113 dma_maps->dmamap_xfer->dm_mapsize, 2114 (flags & WDC_DMA_READ) ? 2115 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2116 2117 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 2118 #ifdef DIAGNOSTIC 2119 /* A segment must not cross a 64k boundary */ 2120 { 2121 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 2122 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 2123 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 2124 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 2125 printf("pciide_dma: segment %d physical addr 0x%lx" 2126 " len 0x%lx not properly aligned\n", 2127 seg, phys, len); 2128 panic("pciide_dma: buf align"); 2129 } 2130 } 2131 #endif 2132 dma_maps->dma_table[seg].base_addr = 2133 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 2134 dma_maps->dma_table[seg].byte_count = 2135 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 2136 IDEDMA_BYTE_COUNT_MASK); 2137 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 2138 seg, letoh32(dma_maps->dma_table[seg].byte_count), 2139 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 2140 2141 } 2142 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 2143 htole32(IDEDMA_BYTE_COUNT_EOT); 2144 2145 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 2146 dma_maps->dmamap_table->dm_mapsize, 2147 BUS_DMASYNC_PREWRITE); 2148 2149 /* Maps are ready. Start DMA function */ 2150 #ifdef DIAGNOSTIC 2151 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 2152 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 2153 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2154 panic("pciide_dma_init: table align"); 2155 } 2156 #endif 2157 2158 /* Clear status bits */ 2159 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 2160 /* Write table addr */ 2161 PCIIDE_DMATBL_WRITE(sc, channel, 2162 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2163 /* set read/write */ 2164 PCIIDE_DMACMD_WRITE(sc, channel, 2165 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 2166 /* remember flags */ 2167 dma_maps->dma_flags = flags; 2168 return (0); 2169 } 2170 2171 void 2172 pciide_dma_start(void *v, int channel, int drive) 2173 { 2174 struct pciide_softc *sc = v; 2175 2176 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 2177 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 2178 IDEDMA_CMD_START); 2179 2180 sc->pciide_channels[channel].dma_in_progress = 1; 2181 } 2182 2183 int 2184 pciide_dma_finish(void *v, int channel, int drive, int force) 2185 { 2186 struct pciide_softc *sc = v; 2187 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2188 u_int8_t status; 2189 int error = 0; 2190 struct pciide_dma_maps *dma_maps = 2191 &sc->pciide_channels[channel].dma_maps[drive]; 2192 2193 status = PCIIDE_DMACTL_READ(sc, channel); 2194 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 2195 DEBUG_XFERS); 2196 if (status == 0xff) 2197 return (status); 2198 2199 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 2200 error = WDC_DMAST_NOIRQ; 2201 goto done; 2202 } 2203 2204 /* stop DMA channel */ 2205 PCIIDE_DMACMD_WRITE(sc, channel, 2206 ((dma_maps->dma_flags & WDC_DMA_READ) ? 2207 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 2208 2209 /* Unload the map of the data buffer */ 2210 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2211 dma_maps->dmamap_xfer->dm_mapsize, 2212 (dma_maps->dma_flags & WDC_DMA_READ) ? 2213 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2214 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 2215 2216 /* Clear status bits */ 2217 PCIIDE_DMACTL_WRITE(sc, channel, status); 2218 2219 if ((status & IDEDMA_CTL_ERR) != 0) { 2220 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 2221 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 2222 error |= WDC_DMAST_ERR; 2223 } 2224 2225 if ((status & IDEDMA_CTL_INTR) == 0) { 2226 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 2227 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 2228 drive, status); 2229 error |= WDC_DMAST_NOIRQ; 2230 } 2231 2232 if ((status & IDEDMA_CTL_ACT) != 0) { 2233 /* data underrun, may be a valid condition for ATAPI */ 2234 error |= WDC_DMAST_UNDER; 2235 } 2236 2237 done: 2238 sc->pciide_channels[channel].dma_in_progress = 0; 2239 return (error); 2240 } 2241 2242 void 2243 pciide_irqack(struct channel_softc *chp) 2244 { 2245 struct pciide_channel *cp = (struct pciide_channel *)chp; 2246 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2247 int chan = chp->channel; 2248 2249 /* clear status bits in IDE DMA registers */ 2250 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 2251 } 2252 2253 /* some common code used by several chip_map */ 2254 int 2255 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 2256 { 2257 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2258 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2259 cp->name = PCIIDE_CHANNEL_NAME(channel); 2260 cp->wdc_channel.channel = channel; 2261 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2262 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 2263 if (cp->wdc_channel.ch_queue == NULL) { 2264 printf("%s: %s " 2265 "cannot allocate channel queue", 2266 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2267 return (0); 2268 } 2269 cp->hw_ok = 1; 2270 2271 return (1); 2272 } 2273 2274 void 2275 pciide_chanfree(struct pciide_softc *sc, int channel) 2276 { 2277 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2278 if (cp->wdc_channel.ch_queue) 2279 wdc_free_queue(cp->wdc_channel.ch_queue); 2280 } 2281 2282 /* some common code used by several chip channel_map */ 2283 void 2284 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 2285 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 2286 int (*pci_intr)(void *)) 2287 { 2288 struct channel_softc *wdc_cp = &cp->wdc_channel; 2289 2290 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 2291 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 2292 pci_intr); 2293 else 2294 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2295 wdc_cp->channel, cmdsizep, ctlsizep); 2296 if (cp->hw_ok == 0) 2297 return; 2298 wdc_cp->data32iot = wdc_cp->cmd_iot; 2299 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 2300 wdcattach(wdc_cp); 2301 } 2302 2303 void 2304 pciide_unmap_chan(struct pciide_softc *sc, struct pciide_channel *cp, int flags) 2305 { 2306 struct channel_softc *wdc_cp = &cp->wdc_channel; 2307 2308 wdcdetach(wdc_cp, flags); 2309 2310 if (cp->compat != 0) 2311 pciide_unmapregs_compat(sc, cp); 2312 else 2313 pciide_unmapregs_native(sc, cp); 2314 } 2315 2316 /* 2317 * Generic code to call to know if a channel can be disabled. Return 1 2318 * if channel can be disabled, 0 if not 2319 */ 2320 int 2321 pciide_chan_candisable(struct pciide_channel *cp) 2322 { 2323 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2324 struct channel_softc *wdc_cp = &cp->wdc_channel; 2325 2326 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 2327 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 2328 printf("%s: %s disabled (no drives)\n", 2329 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2330 cp->hw_ok = 0; 2331 return (1); 2332 } 2333 return (0); 2334 } 2335 2336 /* 2337 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 2338 * Set hw_ok=0 on failure 2339 */ 2340 void 2341 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2342 int compatchan, int interface) 2343 { 2344 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2345 struct channel_softc *wdc_cp = &cp->wdc_channel; 2346 2347 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2348 return; 2349 2350 cp->compat = 1; 2351 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 2352 pa, compatchan, pciide_compat_intr, cp); 2353 if (cp->ih == NULL) { 2354 printf("%s: no compatibility interrupt for use by %s\n", 2355 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2356 cp->hw_ok = 0; 2357 } 2358 } 2359 2360 /* 2361 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 2362 * Set hw_ok=0 on failure 2363 */ 2364 void 2365 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2366 int compatchan, int interface) 2367 { 2368 struct channel_softc *wdc_cp = &cp->wdc_channel; 2369 2370 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2371 return; 2372 2373 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 2374 } 2375 2376 void 2377 pciide_print_channels(int nchannels, pcireg_t interface) 2378 { 2379 int i; 2380 2381 for (i = 0; i < nchannels; i++) { 2382 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 2383 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 2384 "configured" : "wired", 2385 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 2386 "compatibility"); 2387 } 2388 2389 printf("\n"); 2390 } 2391 2392 void 2393 pciide_print_modes(struct pciide_channel *cp) 2394 { 2395 wdc_print_current_modes(&cp->wdc_channel); 2396 } 2397 2398 void 2399 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2400 { 2401 struct pciide_channel *cp; 2402 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2403 pcireg_t csr; 2404 int channel, drive; 2405 struct ata_drive_datas *drvp; 2406 u_int8_t idedma_ctl; 2407 bus_size_t cmdsize, ctlsize; 2408 char *failreason; 2409 2410 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2411 printf(": DMA"); 2412 if (sc->sc_pp == &default_product_desc && 2413 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2414 PCIIDE_OPTIONS_DMA) == 0) { 2415 printf(" (unsupported)"); 2416 sc->sc_dma_ok = 0; 2417 } else { 2418 pciide_mapreg_dma(sc, pa); 2419 if (sc->sc_dma_ok != 0) 2420 printf(", (partial support)"); 2421 } 2422 } else { 2423 printf(": no DMA"); 2424 sc->sc_dma_ok = 0; 2425 } 2426 if (sc->sc_dma_ok) { 2427 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2428 sc->sc_wdcdev.irqack = pciide_irqack; 2429 } 2430 sc->sc_wdcdev.PIO_cap = 0; 2431 sc->sc_wdcdev.DMA_cap = 0; 2432 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2433 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2434 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2435 2436 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2437 2438 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2439 cp = &sc->pciide_channels[channel]; 2440 if (pciide_chansetup(sc, channel, interface) == 0) 2441 continue; 2442 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2443 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2444 &ctlsize, pciide_pci_intr); 2445 } else { 2446 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2447 channel, &cmdsize, &ctlsize); 2448 } 2449 if (cp->hw_ok == 0) 2450 continue; 2451 /* 2452 * Check to see if something appears to be there. 2453 */ 2454 failreason = NULL; 2455 pciide_map_compat_intr(pa, cp, channel, interface); 2456 if (cp->hw_ok == 0) 2457 continue; 2458 if (!wdcprobe(&cp->wdc_channel)) { 2459 failreason = "not responding; disabled or no drives?"; 2460 goto next; 2461 } 2462 /* 2463 * Now, make sure it's actually attributable to this PCI IDE 2464 * channel by trying to access the channel again while the 2465 * PCI IDE controller's I/O space is disabled. (If the 2466 * channel no longer appears to be there, it belongs to 2467 * this controller.) YUCK! 2468 */ 2469 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2470 PCI_COMMAND_STATUS_REG); 2471 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2472 csr & ~PCI_COMMAND_IO_ENABLE); 2473 if (wdcprobe(&cp->wdc_channel)) 2474 failreason = "other hardware responding at addresses"; 2475 pci_conf_write(sc->sc_pc, sc->sc_tag, 2476 PCI_COMMAND_STATUS_REG, csr); 2477 next: 2478 if (failreason) { 2479 printf("%s: %s ignored (%s)\n", 2480 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2481 failreason); 2482 cp->hw_ok = 0; 2483 pciide_unmap_compat_intr(pa, cp, channel, interface); 2484 bus_space_unmap(cp->wdc_channel.cmd_iot, 2485 cp->wdc_channel.cmd_ioh, cmdsize); 2486 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2487 bus_space_unmap(cp->wdc_channel.ctl_iot, 2488 cp->ctl_baseioh, ctlsize); 2489 else 2490 bus_space_unmap(cp->wdc_channel.ctl_iot, 2491 cp->wdc_channel.ctl_ioh, ctlsize); 2492 } 2493 if (cp->hw_ok) { 2494 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2495 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2496 wdcattach(&cp->wdc_channel); 2497 } 2498 } 2499 2500 if (sc->sc_dma_ok == 0) 2501 return; 2502 2503 /* Allocate DMA maps */ 2504 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2505 idedma_ctl = 0; 2506 cp = &sc->pciide_channels[channel]; 2507 for (drive = 0; drive < 2; drive++) { 2508 drvp = &cp->wdc_channel.ch_drive[drive]; 2509 /* If no drive, skip */ 2510 if ((drvp->drive_flags & DRIVE) == 0) 2511 continue; 2512 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2513 continue; 2514 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2515 /* Abort DMA setup */ 2516 printf("%s:%d:%d: cannot allocate DMA maps, " 2517 "using PIO transfers\n", 2518 sc->sc_wdcdev.sc_dev.dv_xname, 2519 channel, drive); 2520 drvp->drive_flags &= ~DRIVE_DMA; 2521 } 2522 printf("%s:%d:%d: using DMA data transfers\n", 2523 sc->sc_wdcdev.sc_dev.dv_xname, 2524 channel, drive); 2525 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2526 } 2527 if (idedma_ctl != 0) { 2528 /* Add software bits in status register */ 2529 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2530 } 2531 } 2532 } 2533 2534 void 2535 default_chip_unmap(struct pciide_softc *sc, int flags) 2536 { 2537 struct pciide_channel *cp; 2538 int channel; 2539 2540 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2541 cp = &sc->pciide_channels[channel]; 2542 pciide_unmap_chan(sc, cp, flags); 2543 pciide_chanfree(sc, channel); 2544 } 2545 2546 pciide_unmapreg_dma(sc); 2547 2548 if (sc->sc_cookie) 2549 free(sc->sc_cookie, M_DEVBUF, 0); 2550 } 2551 2552 void 2553 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2554 { 2555 struct pciide_channel *cp; 2556 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2557 int channel; 2558 bus_size_t cmdsize, ctlsize; 2559 2560 if (interface == 0) { 2561 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2562 DEBUG_PROBE); 2563 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2564 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2565 } 2566 2567 printf(": DMA"); 2568 pciide_mapreg_dma(sc, pa); 2569 printf("\n"); 2570 2571 if (sc->sc_dma_ok) { 2572 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2573 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2574 sc->sc_wdcdev.irqack = pciide_irqack; 2575 } 2576 sc->sc_wdcdev.PIO_cap = 4; 2577 sc->sc_wdcdev.DMA_cap = 2; 2578 sc->sc_wdcdev.UDMA_cap = 6; 2579 2580 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2581 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2582 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2583 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2584 sc->sc_wdcdev.set_modes = sata_setup_channel; 2585 sc->chip_unmap = default_chip_unmap; 2586 2587 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2588 cp = &sc->pciide_channels[channel]; 2589 if (pciide_chansetup(sc, channel, interface) == 0) 2590 continue; 2591 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2592 pciide_pci_intr); 2593 sata_setup_channel(&cp->wdc_channel); 2594 } 2595 } 2596 2597 void 2598 sata_setup_channel(struct channel_softc *chp) 2599 { 2600 struct ata_drive_datas *drvp; 2601 int drive; 2602 u_int32_t idedma_ctl; 2603 struct pciide_channel *cp = (struct pciide_channel *)chp; 2604 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2605 2606 /* setup DMA if needed */ 2607 pciide_channel_dma_setup(cp); 2608 2609 idedma_ctl = 0; 2610 2611 for (drive = 0; drive < 2; drive++) { 2612 drvp = &chp->ch_drive[drive]; 2613 /* If no drive, skip */ 2614 if ((drvp->drive_flags & DRIVE) == 0) 2615 continue; 2616 if (drvp->drive_flags & DRIVE_UDMA) { 2617 /* use Ultra/DMA */ 2618 drvp->drive_flags &= ~DRIVE_DMA; 2619 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2620 } else if (drvp->drive_flags & DRIVE_DMA) { 2621 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2622 } 2623 } 2624 2625 /* 2626 * Nothing to do to setup modes; it is meaningless in S-ATA 2627 * (but many S-ATA drives still want to get the SET_FEATURE 2628 * command). 2629 */ 2630 if (idedma_ctl != 0) { 2631 /* Add software bits in status register */ 2632 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2633 } 2634 pciide_print_modes(cp); 2635 } 2636 2637 void 2638 piix_timing_debug(struct pciide_softc *sc) 2639 { 2640 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2641 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2642 DEBUG_PROBE); 2643 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2644 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2645 WDCDEBUG_PRINT((", sidetim=0x%x", 2646 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2647 DEBUG_PROBE); 2648 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2649 WDCDEBUG_PRINT((", udmareg 0x%x", 2650 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2651 DEBUG_PROBE); 2652 } 2653 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2654 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2655 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2656 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2657 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2658 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2659 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2660 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2661 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2662 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2663 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2664 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2665 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2666 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2667 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2668 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2669 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2670 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2671 DEBUG_PROBE); 2672 } 2673 } 2674 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2675 } 2676 2677 void 2678 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2679 { 2680 struct pciide_channel *cp; 2681 int channel; 2682 u_int32_t idetim; 2683 bus_size_t cmdsize, ctlsize; 2684 2685 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2686 2687 printf(": DMA"); 2688 pciide_mapreg_dma(sc, pa); 2689 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2690 WDC_CAPABILITY_MODE; 2691 if (sc->sc_dma_ok) { 2692 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2693 sc->sc_wdcdev.irqack = pciide_irqack; 2694 switch (sc->sc_pp->ide_product) { 2695 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2696 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2697 case PCI_PRODUCT_INTEL_82371AB_IDE: 2698 case PCI_PRODUCT_INTEL_82372FB_IDE: 2699 case PCI_PRODUCT_INTEL_82440MX_IDE: 2700 case PCI_PRODUCT_INTEL_82451NX: 2701 case PCI_PRODUCT_INTEL_82801AA_IDE: 2702 case PCI_PRODUCT_INTEL_82801AB_IDE: 2703 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2704 case PCI_PRODUCT_INTEL_82801BA_IDE: 2705 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2706 case PCI_PRODUCT_INTEL_82801CA_IDE: 2707 case PCI_PRODUCT_INTEL_82801DB_IDE: 2708 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2709 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2710 case PCI_PRODUCT_INTEL_82801EB_IDE: 2711 case PCI_PRODUCT_INTEL_82801FB_IDE: 2712 case PCI_PRODUCT_INTEL_82801GB_IDE: 2713 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2714 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2715 break; 2716 } 2717 } 2718 sc->sc_wdcdev.PIO_cap = 4; 2719 sc->sc_wdcdev.DMA_cap = 2; 2720 switch (sc->sc_pp->ide_product) { 2721 case PCI_PRODUCT_INTEL_82801AA_IDE: 2722 case PCI_PRODUCT_INTEL_82372FB_IDE: 2723 sc->sc_wdcdev.UDMA_cap = 4; 2724 break; 2725 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2726 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2727 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2728 case PCI_PRODUCT_INTEL_82801BA_IDE: 2729 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2730 case PCI_PRODUCT_INTEL_82801CA_IDE: 2731 case PCI_PRODUCT_INTEL_82801DB_IDE: 2732 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2733 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2734 case PCI_PRODUCT_INTEL_82801EB_IDE: 2735 case PCI_PRODUCT_INTEL_82801FB_IDE: 2736 case PCI_PRODUCT_INTEL_82801GB_IDE: 2737 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2738 sc->sc_wdcdev.UDMA_cap = 5; 2739 break; 2740 default: 2741 sc->sc_wdcdev.UDMA_cap = 2; 2742 break; 2743 } 2744 2745 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2746 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2747 sc->sc_wdcdev.set_modes = piix_setup_channel; 2748 } else { 2749 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2750 } 2751 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2752 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2753 2754 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2755 2756 piix_timing_debug(sc); 2757 2758 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2759 cp = &sc->pciide_channels[channel]; 2760 2761 if (pciide_chansetup(sc, channel, interface) == 0) 2762 continue; 2763 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2764 if ((PIIX_IDETIM_READ(idetim, channel) & 2765 PIIX_IDETIM_IDE) == 0) { 2766 printf("%s: %s ignored (disabled)\n", 2767 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2768 continue; 2769 } 2770 pciide_map_compat_intr(pa, cp, channel, interface); 2771 if (cp->hw_ok == 0) 2772 continue; 2773 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2774 pciide_pci_intr); 2775 if (cp->hw_ok == 0) 2776 goto next; 2777 if (pciide_chan_candisable(cp)) { 2778 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2779 channel); 2780 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2781 idetim); 2782 } 2783 if (cp->hw_ok == 0) 2784 goto next; 2785 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2786 next: 2787 if (cp->hw_ok == 0) 2788 pciide_unmap_compat_intr(pa, cp, channel, interface); 2789 } 2790 2791 piix_timing_debug(sc); 2792 } 2793 2794 void 2795 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2796 { 2797 struct pciide_channel *cp; 2798 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2799 int channel; 2800 bus_size_t cmdsize, ctlsize; 2801 u_int8_t reg, ich = 0; 2802 2803 printf(": DMA"); 2804 pciide_mapreg_dma(sc, pa); 2805 2806 if (sc->sc_dma_ok) { 2807 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2808 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2809 sc->sc_wdcdev.irqack = pciide_irqack; 2810 sc->sc_wdcdev.DMA_cap = 2; 2811 sc->sc_wdcdev.UDMA_cap = 6; 2812 } 2813 sc->sc_wdcdev.PIO_cap = 4; 2814 2815 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2816 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2817 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2818 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2819 sc->sc_wdcdev.set_modes = sata_setup_channel; 2820 2821 switch(sc->sc_pp->ide_product) { 2822 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2823 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2824 case PCI_PRODUCT_INTEL_82801EB_SATA: 2825 case PCI_PRODUCT_INTEL_82801ER_SATA: 2826 ich = 5; 2827 break; 2828 case PCI_PRODUCT_INTEL_82801FB_SATA: 2829 case PCI_PRODUCT_INTEL_82801FR_SATA: 2830 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2831 ich = 6; 2832 break; 2833 default: 2834 ich = 7; 2835 break; 2836 } 2837 2838 /* 2839 * Put the SATA portion of controllers that don't operate in combined 2840 * mode into native PCI modes so the maximum number of devices can be 2841 * used. Intel calls this "enhanced mode" 2842 */ 2843 if (ich == 5) { 2844 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2845 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2846 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2847 ICH5_SATA_PI); 2848 reg |= ICH5_SATA_PI_PRI_NATIVE | 2849 ICH5_SATA_PI_SEC_NATIVE; 2850 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2851 ICH5_SATA_PI, reg); 2852 interface |= PCIIDE_INTERFACE_PCI(0) | 2853 PCIIDE_INTERFACE_PCI(1); 2854 } 2855 } else { 2856 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2857 ICH6_SATA_MAP_CMB_MASK; 2858 if (reg != ICH6_SATA_MAP_CMB_PRI && 2859 reg != ICH6_SATA_MAP_CMB_SEC) { 2860 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2861 ICH5_SATA_PI); 2862 reg |= ICH5_SATA_PI_PRI_NATIVE | 2863 ICH5_SATA_PI_SEC_NATIVE; 2864 2865 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2866 ICH5_SATA_PI, reg); 2867 interface |= PCIIDE_INTERFACE_PCI(0) | 2868 PCIIDE_INTERFACE_PCI(1); 2869 2870 /* 2871 * Ask for SATA IDE Mode, we don't need to do this 2872 * for the combined mode case as combined mode is 2873 * only allowed in IDE Mode 2874 */ 2875 if (ich >= 7) { 2876 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2877 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2878 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2879 ICH5_SATA_MAP, reg); 2880 } 2881 } 2882 } 2883 2884 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2885 2886 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2887 cp = &sc->pciide_channels[channel]; 2888 if (pciide_chansetup(sc, channel, interface) == 0) 2889 continue; 2890 2891 pciide_map_compat_intr(pa, cp, channel, interface); 2892 if (cp->hw_ok == 0) 2893 continue; 2894 2895 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2896 pciide_pci_intr); 2897 if (cp->hw_ok != 0) 2898 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2899 2900 if (cp->hw_ok == 0) 2901 pciide_unmap_compat_intr(pa, cp, channel, interface); 2902 } 2903 } 2904 2905 void 2906 piix_setup_channel(struct channel_softc *chp) 2907 { 2908 u_int8_t mode[2], drive; 2909 u_int32_t oidetim, idetim, idedma_ctl; 2910 struct pciide_channel *cp = (struct pciide_channel *)chp; 2911 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2912 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2913 2914 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2915 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2916 idedma_ctl = 0; 2917 2918 /* set up new idetim: Enable IDE registers decode */ 2919 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2920 chp->channel); 2921 2922 /* setup DMA */ 2923 pciide_channel_dma_setup(cp); 2924 2925 /* 2926 * Here we have to mess up with drives mode: PIIX can't have 2927 * different timings for master and slave drives. 2928 * We need to find the best combination. 2929 */ 2930 2931 /* If both drives supports DMA, take the lower mode */ 2932 if ((drvp[0].drive_flags & DRIVE_DMA) && 2933 (drvp[1].drive_flags & DRIVE_DMA)) { 2934 mode[0] = mode[1] = 2935 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2936 drvp[0].DMA_mode = mode[0]; 2937 drvp[1].DMA_mode = mode[1]; 2938 goto ok; 2939 } 2940 /* 2941 * If only one drive supports DMA, use its mode, and 2942 * put the other one in PIO mode 0 if mode not compatible 2943 */ 2944 if (drvp[0].drive_flags & DRIVE_DMA) { 2945 mode[0] = drvp[0].DMA_mode; 2946 mode[1] = drvp[1].PIO_mode; 2947 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2948 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2949 mode[1] = drvp[1].PIO_mode = 0; 2950 goto ok; 2951 } 2952 if (drvp[1].drive_flags & DRIVE_DMA) { 2953 mode[1] = drvp[1].DMA_mode; 2954 mode[0] = drvp[0].PIO_mode; 2955 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2956 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2957 mode[0] = drvp[0].PIO_mode = 0; 2958 goto ok; 2959 } 2960 /* 2961 * If both drives are not DMA, takes the lower mode, unless 2962 * one of them is PIO mode < 2 2963 */ 2964 if (drvp[0].PIO_mode < 2) { 2965 mode[0] = drvp[0].PIO_mode = 0; 2966 mode[1] = drvp[1].PIO_mode; 2967 } else if (drvp[1].PIO_mode < 2) { 2968 mode[1] = drvp[1].PIO_mode = 0; 2969 mode[0] = drvp[0].PIO_mode; 2970 } else { 2971 mode[0] = mode[1] = 2972 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 2973 drvp[0].PIO_mode = mode[0]; 2974 drvp[1].PIO_mode = mode[1]; 2975 } 2976 ok: /* The modes are setup */ 2977 for (drive = 0; drive < 2; drive++) { 2978 if (drvp[drive].drive_flags & DRIVE_DMA) { 2979 idetim |= piix_setup_idetim_timings( 2980 mode[drive], 1, chp->channel); 2981 goto end; 2982 } 2983 } 2984 /* If we are there, none of the drives are DMA */ 2985 if (mode[0] >= 2) 2986 idetim |= piix_setup_idetim_timings( 2987 mode[0], 0, chp->channel); 2988 else 2989 idetim |= piix_setup_idetim_timings( 2990 mode[1], 0, chp->channel); 2991 end: /* 2992 * timing mode is now set up in the controller. Enable 2993 * it per-drive 2994 */ 2995 for (drive = 0; drive < 2; drive++) { 2996 /* If no drive, skip */ 2997 if ((drvp[drive].drive_flags & DRIVE) == 0) 2998 continue; 2999 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 3000 if (drvp[drive].drive_flags & DRIVE_DMA) 3001 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3002 } 3003 if (idedma_ctl != 0) { 3004 /* Add software bits in status register */ 3005 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3006 IDEDMA_CTL(chp->channel), 3007 idedma_ctl); 3008 } 3009 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 3010 pciide_print_modes(cp); 3011 } 3012 3013 void 3014 piix3_4_setup_channel(struct channel_softc *chp) 3015 { 3016 struct ata_drive_datas *drvp; 3017 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 3018 struct pciide_channel *cp = (struct pciide_channel *)chp; 3019 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3020 int drive; 3021 int channel = chp->channel; 3022 3023 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 3024 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 3025 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 3026 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 3027 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 3028 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 3029 PIIX_SIDETIM_RTC_MASK(channel)); 3030 3031 idedma_ctl = 0; 3032 /* If channel disabled, no need to go further */ 3033 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 3034 return; 3035 /* set up new idetim: Enable IDE registers decode */ 3036 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 3037 3038 /* setup DMA if needed */ 3039 pciide_channel_dma_setup(cp); 3040 3041 for (drive = 0; drive < 2; drive++) { 3042 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 3043 PIIX_UDMATIM_SET(0x3, channel, drive)); 3044 drvp = &chp->ch_drive[drive]; 3045 /* If no drive, skip */ 3046 if ((drvp->drive_flags & DRIVE) == 0) 3047 continue; 3048 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3049 (drvp->drive_flags & DRIVE_UDMA) == 0)) 3050 goto pio; 3051 3052 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 3053 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 3054 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 3055 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 3056 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 3057 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 3058 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 3059 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 3060 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 3061 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 3062 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 3063 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 3064 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 3065 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 3066 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 3067 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 3068 ideconf |= PIIX_CONFIG_PINGPONG; 3069 } 3070 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 3071 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 3072 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 3073 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 3074 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 3075 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 3076 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 3077 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 3078 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 3079 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 3080 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 3081 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 3082 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 3083 /* setup Ultra/100 */ 3084 if (drvp->UDMA_mode > 2 && 3085 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 3086 drvp->UDMA_mode = 2; 3087 if (drvp->UDMA_mode > 4) { 3088 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 3089 } else { 3090 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 3091 if (drvp->UDMA_mode > 2) { 3092 ideconf |= PIIX_CONFIG_UDMA66(channel, 3093 drive); 3094 } else { 3095 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 3096 drive); 3097 } 3098 } 3099 } 3100 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 3101 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 3102 /* setup Ultra/66 */ 3103 if (drvp->UDMA_mode > 2 && 3104 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 3105 drvp->UDMA_mode = 2; 3106 if (drvp->UDMA_mode > 2) 3107 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 3108 else 3109 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 3110 } 3111 3112 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3113 (drvp->drive_flags & DRIVE_UDMA)) { 3114 /* use Ultra/DMA */ 3115 drvp->drive_flags &= ~DRIVE_DMA; 3116 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 3117 udmareg |= PIIX_UDMATIM_SET( 3118 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 3119 } else { 3120 /* use Multiword DMA */ 3121 drvp->drive_flags &= ~DRIVE_UDMA; 3122 if (drive == 0) { 3123 idetim |= piix_setup_idetim_timings( 3124 drvp->DMA_mode, 1, channel); 3125 } else { 3126 sidetim |= piix_setup_sidetim_timings( 3127 drvp->DMA_mode, 1, channel); 3128 idetim = PIIX_IDETIM_SET(idetim, 3129 PIIX_IDETIM_SITRE, channel); 3130 } 3131 } 3132 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3133 3134 pio: /* use PIO mode */ 3135 idetim |= piix_setup_idetim_drvs(drvp); 3136 if (drive == 0) { 3137 idetim |= piix_setup_idetim_timings( 3138 drvp->PIO_mode, 0, channel); 3139 } else { 3140 sidetim |= piix_setup_sidetim_timings( 3141 drvp->PIO_mode, 0, channel); 3142 idetim = PIIX_IDETIM_SET(idetim, 3143 PIIX_IDETIM_SITRE, channel); 3144 } 3145 } 3146 if (idedma_ctl != 0) { 3147 /* Add software bits in status register */ 3148 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3149 IDEDMA_CTL(channel), 3150 idedma_ctl); 3151 } 3152 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 3153 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 3154 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 3155 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 3156 pciide_print_modes(cp); 3157 } 3158 3159 3160 /* setup ISP and RTC fields, based on mode */ 3161 u_int32_t 3162 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3163 { 3164 3165 if (dma) 3166 return (PIIX_IDETIM_SET(0, 3167 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 3168 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 3169 channel)); 3170 else 3171 return (PIIX_IDETIM_SET(0, 3172 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 3173 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 3174 channel)); 3175 } 3176 3177 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 3178 u_int32_t 3179 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 3180 { 3181 u_int32_t ret = 0; 3182 struct channel_softc *chp = drvp->chnl_softc; 3183 u_int8_t channel = chp->channel; 3184 u_int8_t drive = drvp->drive; 3185 3186 /* 3187 * If drive is using UDMA, timings setups are independant 3188 * So just check DMA and PIO here. 3189 */ 3190 if (drvp->drive_flags & DRIVE_DMA) { 3191 /* if mode = DMA mode 0, use compatible timings */ 3192 if ((drvp->drive_flags & DRIVE_DMA) && 3193 drvp->DMA_mode == 0) { 3194 drvp->PIO_mode = 0; 3195 return (ret); 3196 } 3197 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3198 /* 3199 * PIO and DMA timings are the same, use fast timings for PIO 3200 * too, else use compat timings. 3201 */ 3202 if ((piix_isp_pio[drvp->PIO_mode] != 3203 piix_isp_dma[drvp->DMA_mode]) || 3204 (piix_rtc_pio[drvp->PIO_mode] != 3205 piix_rtc_dma[drvp->DMA_mode])) 3206 drvp->PIO_mode = 0; 3207 /* if PIO mode <= 2, use compat timings for PIO */ 3208 if (drvp->PIO_mode <= 2) { 3209 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 3210 channel); 3211 return (ret); 3212 } 3213 } 3214 3215 /* 3216 * Now setup PIO modes. If mode < 2, use compat timings. 3217 * Else enable fast timings. Enable IORDY and prefetch/post 3218 * if PIO mode >= 3. 3219 */ 3220 3221 if (drvp->PIO_mode < 2) 3222 return (ret); 3223 3224 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3225 if (drvp->PIO_mode >= 3) { 3226 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 3227 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 3228 } 3229 return (ret); 3230 } 3231 3232 /* setup values in SIDETIM registers, based on mode */ 3233 u_int32_t 3234 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3235 { 3236 if (dma) 3237 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 3238 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 3239 else 3240 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 3241 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 3242 } 3243 3244 void 3245 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3246 { 3247 struct pciide_channel *cp; 3248 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3249 int channel; 3250 pcireg_t chanenable; 3251 bus_size_t cmdsize, ctlsize; 3252 3253 printf(": DMA"); 3254 pciide_mapreg_dma(sc, pa); 3255 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3256 WDC_CAPABILITY_MODE; 3257 if (sc->sc_dma_ok) { 3258 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3259 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3260 sc->sc_wdcdev.irqack = pciide_irqack; 3261 } 3262 sc->sc_wdcdev.PIO_cap = 4; 3263 sc->sc_wdcdev.DMA_cap = 2; 3264 switch (sc->sc_pp->ide_product) { 3265 case PCI_PRODUCT_AMD_8111_IDE: 3266 sc->sc_wdcdev.UDMA_cap = 6; 3267 break; 3268 case PCI_PRODUCT_AMD_766_IDE: 3269 case PCI_PRODUCT_AMD_PBC768_IDE: 3270 sc->sc_wdcdev.UDMA_cap = 5; 3271 break; 3272 default: 3273 sc->sc_wdcdev.UDMA_cap = 4; 3274 break; 3275 } 3276 sc->sc_wdcdev.set_modes = amd756_setup_channel; 3277 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3278 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3279 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 3280 3281 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3282 3283 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3284 cp = &sc->pciide_channels[channel]; 3285 if (pciide_chansetup(sc, channel, interface) == 0) 3286 continue; 3287 3288 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 3289 printf("%s: %s ignored (disabled)\n", 3290 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3291 continue; 3292 } 3293 pciide_map_compat_intr(pa, cp, channel, interface); 3294 if (cp->hw_ok == 0) 3295 continue; 3296 3297 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3298 pciide_pci_intr); 3299 3300 if (pciide_chan_candisable(cp)) { 3301 chanenable &= ~AMD756_CHAN_EN(channel); 3302 } 3303 if (cp->hw_ok == 0) { 3304 pciide_unmap_compat_intr(pa, cp, channel, interface); 3305 continue; 3306 } 3307 3308 amd756_setup_channel(&cp->wdc_channel); 3309 } 3310 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 3311 chanenable); 3312 return; 3313 } 3314 3315 void 3316 amd756_setup_channel(struct channel_softc *chp) 3317 { 3318 u_int32_t udmatim_reg, datatim_reg; 3319 u_int8_t idedma_ctl; 3320 int mode, drive; 3321 struct ata_drive_datas *drvp; 3322 struct pciide_channel *cp = (struct pciide_channel *)chp; 3323 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3324 pcireg_t chanenable; 3325 #ifndef PCIIDE_AMD756_ENABLEDMA 3326 int product = sc->sc_pp->ide_product; 3327 int rev = sc->sc_rev; 3328 #endif 3329 3330 idedma_ctl = 0; 3331 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 3332 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 3333 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 3334 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 3335 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 3336 AMD756_CHANSTATUS_EN); 3337 3338 /* setup DMA if needed */ 3339 pciide_channel_dma_setup(cp); 3340 3341 for (drive = 0; drive < 2; drive++) { 3342 drvp = &chp->ch_drive[drive]; 3343 /* If no drive, skip */ 3344 if ((drvp->drive_flags & DRIVE) == 0) 3345 continue; 3346 /* add timing values, setup DMA if needed */ 3347 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3348 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3349 mode = drvp->PIO_mode; 3350 goto pio; 3351 } 3352 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3353 (drvp->drive_flags & DRIVE_UDMA)) { 3354 /* use Ultra/DMA */ 3355 drvp->drive_flags &= ~DRIVE_DMA; 3356 3357 /* Check cable */ 3358 if ((chanenable & AMD756_CABLE(chp->channel, 3359 drive)) == 0 && drvp->UDMA_mode > 2) { 3360 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 3361 "cable not detected\n", drvp->drive_name, 3362 sc->sc_wdcdev.sc_dev.dv_xname, 3363 chp->channel, drive), DEBUG_PROBE); 3364 drvp->UDMA_mode = 2; 3365 } 3366 3367 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 3368 AMD756_UDMA_EN_MTH(chp->channel, drive) | 3369 AMD756_UDMA_TIME(chp->channel, drive, 3370 amd756_udma_tim[drvp->UDMA_mode]); 3371 /* can use PIO timings, MW DMA unused */ 3372 mode = drvp->PIO_mode; 3373 } else { 3374 /* use Multiword DMA, but only if revision is OK */ 3375 drvp->drive_flags &= ~DRIVE_UDMA; 3376 #ifndef PCIIDE_AMD756_ENABLEDMA 3377 /* 3378 * The workaround doesn't seem to be necessary 3379 * with all drives, so it can be disabled by 3380 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 3381 * triggered. 3382 */ 3383 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 3384 printf("%s:%d:%d: multi-word DMA disabled due " 3385 "to chip revision\n", 3386 sc->sc_wdcdev.sc_dev.dv_xname, 3387 chp->channel, drive); 3388 mode = drvp->PIO_mode; 3389 drvp->drive_flags &= ~DRIVE_DMA; 3390 goto pio; 3391 } 3392 #endif 3393 /* mode = min(pio, dma+2) */ 3394 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3395 mode = drvp->PIO_mode; 3396 else 3397 mode = drvp->DMA_mode + 2; 3398 } 3399 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3400 3401 pio: /* setup PIO mode */ 3402 if (mode <= 2) { 3403 drvp->DMA_mode = 0; 3404 drvp->PIO_mode = 0; 3405 mode = 0; 3406 } else { 3407 drvp->PIO_mode = mode; 3408 drvp->DMA_mode = mode - 2; 3409 } 3410 datatim_reg |= 3411 AMD756_DATATIM_PULSE(chp->channel, drive, 3412 amd756_pio_set[mode]) | 3413 AMD756_DATATIM_RECOV(chp->channel, drive, 3414 amd756_pio_rec[mode]); 3415 } 3416 if (idedma_ctl != 0) { 3417 /* Add software bits in status register */ 3418 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3419 IDEDMA_CTL(chp->channel), 3420 idedma_ctl); 3421 } 3422 pciide_print_modes(cp); 3423 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3424 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3425 } 3426 3427 void 3428 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3429 { 3430 struct pciide_channel *cp; 3431 pcireg_t interface; 3432 int no_ideconf = 0, channel; 3433 u_int32_t ideconf; 3434 bus_size_t cmdsize, ctlsize; 3435 pcitag_t tag; 3436 pcireg_t id, class; 3437 3438 /* 3439 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3440 */ 3441 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3442 interface = PCI_INTERFACE(pa->pa_class); 3443 } else { 3444 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3445 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3446 } 3447 3448 switch (PCI_PRODUCT(pa->pa_id)) { 3449 case PCI_PRODUCT_VIATECH_VT6410: 3450 case PCI_PRODUCT_VIATECH_VT6415: 3451 no_ideconf = 1; 3452 /* FALLTHROUGH */ 3453 case PCI_PRODUCT_VIATECH_CX700_IDE: 3454 case PCI_PRODUCT_VIATECH_VX700_IDE: 3455 case PCI_PRODUCT_VIATECH_VX855_IDE: 3456 case PCI_PRODUCT_VIATECH_VX900_IDE: 3457 printf(": ATA133"); 3458 sc->sc_wdcdev.UDMA_cap = 6; 3459 break; 3460 default: 3461 /* 3462 * Determine the DMA capabilities by looking at the 3463 * ISA bridge. 3464 */ 3465 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3466 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3467 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3468 3469 /* 3470 * XXX On the VT8237, the ISA bridge is on a different 3471 * device. 3472 */ 3473 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3474 pa->pa_device == 15) { 3475 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3476 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3477 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3478 } 3479 3480 switch (PCI_PRODUCT(id)) { 3481 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3482 if (PCI_REVISION(class) >= 0x02) { 3483 printf(": ATA33"); 3484 sc->sc_wdcdev.UDMA_cap = 2; 3485 } else { 3486 printf(": DMA"); 3487 sc->sc_wdcdev.UDMA_cap = 0; 3488 } 3489 break; 3490 case PCI_PRODUCT_VIATECH_VT82C596A: 3491 if (PCI_REVISION(class) >= 0x12) { 3492 printf(": ATA66"); 3493 sc->sc_wdcdev.UDMA_cap = 4; 3494 } else { 3495 printf(": ATA33"); 3496 sc->sc_wdcdev.UDMA_cap = 2; 3497 } 3498 break; 3499 3500 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3501 if (PCI_REVISION(class) >= 0x40) { 3502 printf(": ATA100"); 3503 sc->sc_wdcdev.UDMA_cap = 5; 3504 } else { 3505 printf(": ATA66"); 3506 sc->sc_wdcdev.UDMA_cap = 4; 3507 } 3508 break; 3509 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3510 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3511 printf(": ATA100"); 3512 sc->sc_wdcdev.UDMA_cap = 5; 3513 break; 3514 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3515 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3516 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3517 printf(": ATA133"); 3518 sc->sc_wdcdev.UDMA_cap = 6; 3519 break; 3520 default: 3521 printf(": DMA"); 3522 sc->sc_wdcdev.UDMA_cap = 0; 3523 break; 3524 } 3525 break; 3526 } 3527 3528 pciide_mapreg_dma(sc, pa); 3529 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3530 WDC_CAPABILITY_MODE; 3531 if (sc->sc_dma_ok) { 3532 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3533 sc->sc_wdcdev.irqack = pciide_irqack; 3534 if (sc->sc_wdcdev.UDMA_cap > 0) 3535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3536 } 3537 sc->sc_wdcdev.PIO_cap = 4; 3538 sc->sc_wdcdev.DMA_cap = 2; 3539 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3540 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3541 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3542 3543 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3544 3545 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3546 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3547 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3548 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3549 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3550 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3551 DEBUG_PROBE); 3552 3553 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3554 cp = &sc->pciide_channels[channel]; 3555 if (pciide_chansetup(sc, channel, interface) == 0) 3556 continue; 3557 3558 if (no_ideconf == 0) { 3559 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, 3560 APO_IDECONF); 3561 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3562 printf("%s: %s ignored (disabled)\n", 3563 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3564 continue; 3565 } 3566 } 3567 pciide_map_compat_intr(pa, cp, channel, interface); 3568 if (cp->hw_ok == 0) 3569 continue; 3570 3571 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3572 pciide_pci_intr); 3573 if (cp->hw_ok == 0) { 3574 goto next; 3575 } 3576 if (pciide_chan_candisable(cp)) { 3577 if (no_ideconf == 0) { 3578 ideconf &= ~APO_IDECONF_EN(channel); 3579 pci_conf_write(sc->sc_pc, sc->sc_tag, 3580 APO_IDECONF, ideconf); 3581 } 3582 } 3583 3584 if (cp->hw_ok == 0) 3585 goto next; 3586 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3587 next: 3588 if (cp->hw_ok == 0) 3589 pciide_unmap_compat_intr(pa, cp, channel, interface); 3590 } 3591 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3592 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3593 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3594 } 3595 3596 void 3597 apollo_setup_channel(struct channel_softc *chp) 3598 { 3599 u_int32_t udmatim_reg, datatim_reg; 3600 u_int8_t idedma_ctl; 3601 int mode, drive; 3602 struct ata_drive_datas *drvp; 3603 struct pciide_channel *cp = (struct pciide_channel *)chp; 3604 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3605 3606 idedma_ctl = 0; 3607 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3608 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3609 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3610 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3611 3612 /* setup DMA if needed */ 3613 pciide_channel_dma_setup(cp); 3614 3615 /* 3616 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3617 * downgrade to Ultra/33 if needed 3618 */ 3619 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3620 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3621 /* both drives UDMA */ 3622 if (chp->ch_drive[0].UDMA_mode > 2 && 3623 chp->ch_drive[1].UDMA_mode <= 2) { 3624 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3625 chp->ch_drive[0].UDMA_mode = 2; 3626 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3627 chp->ch_drive[0].UDMA_mode <= 2) { 3628 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3629 chp->ch_drive[1].UDMA_mode = 2; 3630 } 3631 } 3632 3633 for (drive = 0; drive < 2; drive++) { 3634 drvp = &chp->ch_drive[drive]; 3635 /* If no drive, skip */ 3636 if ((drvp->drive_flags & DRIVE) == 0) 3637 continue; 3638 /* add timing values, setup DMA if needed */ 3639 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3640 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3641 mode = drvp->PIO_mode; 3642 goto pio; 3643 } 3644 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3645 (drvp->drive_flags & DRIVE_UDMA)) { 3646 /* use Ultra/DMA */ 3647 drvp->drive_flags &= ~DRIVE_DMA; 3648 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3649 APO_UDMA_EN_MTH(chp->channel, drive); 3650 if (sc->sc_wdcdev.UDMA_cap == 6) { 3651 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3652 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3653 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3654 /* 686b */ 3655 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3656 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3657 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3658 /* 596b or 686a */ 3659 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3660 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3661 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3662 } else { 3663 /* 596a or 586b */ 3664 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3665 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3666 } 3667 /* can use PIO timings, MW DMA unused */ 3668 mode = drvp->PIO_mode; 3669 } else { 3670 /* use Multiword DMA */ 3671 drvp->drive_flags &= ~DRIVE_UDMA; 3672 /* mode = min(pio, dma+2) */ 3673 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3674 mode = drvp->PIO_mode; 3675 else 3676 mode = drvp->DMA_mode + 2; 3677 } 3678 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3679 3680 pio: /* setup PIO mode */ 3681 if (mode <= 2) { 3682 drvp->DMA_mode = 0; 3683 drvp->PIO_mode = 0; 3684 mode = 0; 3685 } else { 3686 drvp->PIO_mode = mode; 3687 drvp->DMA_mode = mode - 2; 3688 } 3689 datatim_reg |= 3690 APO_DATATIM_PULSE(chp->channel, drive, 3691 apollo_pio_set[mode]) | 3692 APO_DATATIM_RECOV(chp->channel, drive, 3693 apollo_pio_rec[mode]); 3694 } 3695 if (idedma_ctl != 0) { 3696 /* Add software bits in status register */ 3697 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3698 IDEDMA_CTL(chp->channel), 3699 idedma_ctl); 3700 } 3701 pciide_print_modes(cp); 3702 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3703 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3704 } 3705 3706 void 3707 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3708 int channel) 3709 { 3710 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3711 bus_size_t cmdsize, ctlsize; 3712 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3713 pcireg_t interface; 3714 int one_channel; 3715 3716 /* 3717 * The 0648/0649 can be told to identify as a RAID controller. 3718 * In this case, we have to fake interface 3719 */ 3720 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3721 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3722 PCIIDE_INTERFACE_SETTABLE(1); 3723 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3724 CMD_CONF_DSA1) 3725 interface |= PCIIDE_INTERFACE_PCI(0) | 3726 PCIIDE_INTERFACE_PCI(1); 3727 } else { 3728 interface = PCI_INTERFACE(pa->pa_class); 3729 } 3730 3731 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3732 cp->name = PCIIDE_CHANNEL_NAME(channel); 3733 cp->wdc_channel.channel = channel; 3734 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3735 3736 /* 3737 * Older CMD64X doesn't have independant channels 3738 */ 3739 switch (sc->sc_pp->ide_product) { 3740 case PCI_PRODUCT_CMDTECH_649: 3741 one_channel = 0; 3742 break; 3743 default: 3744 one_channel = 1; 3745 break; 3746 } 3747 3748 if (channel > 0 && one_channel) { 3749 cp->wdc_channel.ch_queue = 3750 sc->pciide_channels[0].wdc_channel.ch_queue; 3751 } else { 3752 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 3753 } 3754 if (cp->wdc_channel.ch_queue == NULL) { 3755 printf( 3756 "%s: %s cannot allocate channel queue", 3757 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3758 return; 3759 } 3760 3761 /* 3762 * with a CMD PCI64x, if we get here, the first channel is enabled: 3763 * there's no way to disable the first channel without disabling 3764 * the whole device 3765 */ 3766 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3767 printf("%s: %s ignored (disabled)\n", 3768 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3769 return; 3770 } 3771 cp->hw_ok = 1; 3772 pciide_map_compat_intr(pa, cp, channel, interface); 3773 if (cp->hw_ok == 0) 3774 return; 3775 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3776 if (cp->hw_ok == 0) { 3777 pciide_unmap_compat_intr(pa, cp, channel, interface); 3778 return; 3779 } 3780 if (pciide_chan_candisable(cp)) { 3781 if (channel == 1) { 3782 ctrl &= ~CMD_CTRL_2PORT; 3783 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3784 CMD_CTRL, ctrl); 3785 pciide_unmap_compat_intr(pa, cp, channel, interface); 3786 } 3787 } 3788 } 3789 3790 int 3791 cmd_pci_intr(void *arg) 3792 { 3793 struct pciide_softc *sc = arg; 3794 struct pciide_channel *cp; 3795 struct channel_softc *wdc_cp; 3796 int i, rv, crv; 3797 u_int32_t priirq, secirq; 3798 3799 rv = 0; 3800 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3801 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3802 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3803 cp = &sc->pciide_channels[i]; 3804 wdc_cp = &cp->wdc_channel; 3805 /* If a compat channel skip. */ 3806 if (cp->compat) 3807 continue; 3808 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3809 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3810 crv = wdcintr(wdc_cp); 3811 if (crv == 0) { 3812 #if 0 3813 printf("%s:%d: bogus intr\n", 3814 sc->sc_wdcdev.sc_dev.dv_xname, i); 3815 #endif 3816 } else 3817 rv = 1; 3818 } 3819 } 3820 return (rv); 3821 } 3822 3823 void 3824 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3825 { 3826 int channel; 3827 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3828 3829 printf(": no DMA"); 3830 sc->sc_dma_ok = 0; 3831 3832 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3833 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3834 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3835 3836 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3837 3838 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3839 cmd_channel_map(pa, sc, channel); 3840 } 3841 } 3842 3843 void 3844 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3845 { 3846 struct pciide_channel *cp; 3847 int channel; 3848 int rev = sc->sc_rev; 3849 pcireg_t interface; 3850 3851 /* 3852 * The 0648/0649 can be told to identify as a RAID controller. 3853 * In this case, we have to fake interface 3854 */ 3855 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3856 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3857 PCIIDE_INTERFACE_SETTABLE(1); 3858 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3859 CMD_CONF_DSA1) 3860 interface |= PCIIDE_INTERFACE_PCI(0) | 3861 PCIIDE_INTERFACE_PCI(1); 3862 } else { 3863 interface = PCI_INTERFACE(pa->pa_class); 3864 } 3865 3866 printf(": DMA"); 3867 pciide_mapreg_dma(sc, pa); 3868 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3869 WDC_CAPABILITY_MODE; 3870 if (sc->sc_dma_ok) { 3871 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3872 switch (sc->sc_pp->ide_product) { 3873 case PCI_PRODUCT_CMDTECH_649: 3874 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3875 sc->sc_wdcdev.UDMA_cap = 5; 3876 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3877 break; 3878 case PCI_PRODUCT_CMDTECH_648: 3879 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3880 sc->sc_wdcdev.UDMA_cap = 4; 3881 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3882 break; 3883 case PCI_PRODUCT_CMDTECH_646: 3884 if (rev >= CMD0646U2_REV) { 3885 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3886 sc->sc_wdcdev.UDMA_cap = 2; 3887 } else if (rev >= CMD0646U_REV) { 3888 /* 3889 * Linux's driver claims that the 646U is broken 3890 * with UDMA. Only enable it if we know what we're 3891 * doing 3892 */ 3893 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3894 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3895 sc->sc_wdcdev.UDMA_cap = 2; 3896 #endif 3897 /* explicitly disable UDMA */ 3898 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3899 CMD_UDMATIM(0), 0); 3900 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3901 CMD_UDMATIM(1), 0); 3902 } 3903 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3904 break; 3905 default: 3906 sc->sc_wdcdev.irqack = pciide_irqack; 3907 } 3908 } 3909 3910 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3911 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3912 sc->sc_wdcdev.PIO_cap = 4; 3913 sc->sc_wdcdev.DMA_cap = 2; 3914 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3915 3916 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3917 3918 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3919 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3920 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3921 DEBUG_PROBE); 3922 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3923 cp = &sc->pciide_channels[channel]; 3924 cmd_channel_map(pa, sc, channel); 3925 if (cp->hw_ok == 0) 3926 continue; 3927 cmd0643_9_setup_channel(&cp->wdc_channel); 3928 } 3929 /* 3930 * note - this also makes sure we clear the irq disable and reset 3931 * bits 3932 */ 3933 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3934 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3935 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3936 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3937 DEBUG_PROBE); 3938 } 3939 3940 void 3941 cmd0643_9_setup_channel(struct channel_softc *chp) 3942 { 3943 struct ata_drive_datas *drvp; 3944 u_int8_t tim; 3945 u_int32_t idedma_ctl, udma_reg; 3946 int drive; 3947 struct pciide_channel *cp = (struct pciide_channel *)chp; 3948 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3949 3950 idedma_ctl = 0; 3951 /* setup DMA if needed */ 3952 pciide_channel_dma_setup(cp); 3953 3954 for (drive = 0; drive < 2; drive++) { 3955 drvp = &chp->ch_drive[drive]; 3956 /* If no drive, skip */ 3957 if ((drvp->drive_flags & DRIVE) == 0) 3958 continue; 3959 /* add timing values, setup DMA if needed */ 3960 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3961 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3962 if (drvp->drive_flags & DRIVE_UDMA) { 3963 /* UltraDMA on a 646U2, 0648 or 0649 */ 3964 drvp->drive_flags &= ~DRIVE_DMA; 3965 udma_reg = pciide_pci_read(sc->sc_pc, 3966 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3967 if (drvp->UDMA_mode > 2 && 3968 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3969 CMD_BICSR) & 3970 CMD_BICSR_80(chp->channel)) == 0) { 3971 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3972 "80-wire cable not detected\n", 3973 drvp->drive_name, 3974 sc->sc_wdcdev.sc_dev.dv_xname, 3975 chp->channel, drive), DEBUG_PROBE); 3976 drvp->UDMA_mode = 2; 3977 } 3978 if (drvp->UDMA_mode > 2) 3979 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 3980 else if (sc->sc_wdcdev.UDMA_cap > 2) 3981 udma_reg |= CMD_UDMATIM_UDMA33(drive); 3982 udma_reg |= CMD_UDMATIM_UDMA(drive); 3983 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 3984 CMD_UDMATIM_TIM_OFF(drive)); 3985 udma_reg |= 3986 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 3987 CMD_UDMATIM_TIM_OFF(drive)); 3988 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3989 CMD_UDMATIM(chp->channel), udma_reg); 3990 } else { 3991 /* 3992 * use Multiword DMA. 3993 * Timings will be used for both PIO and DMA, 3994 * so adjust DMA mode if needed 3995 * if we have a 0646U2/8/9, turn off UDMA 3996 */ 3997 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 3998 udma_reg = pciide_pci_read(sc->sc_pc, 3999 sc->sc_tag, 4000 CMD_UDMATIM(chp->channel)); 4001 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 4002 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4003 CMD_UDMATIM(chp->channel), 4004 udma_reg); 4005 } 4006 if (drvp->PIO_mode >= 3 && 4007 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 4008 drvp->DMA_mode = drvp->PIO_mode - 2; 4009 } 4010 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 4011 } 4012 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4013 } 4014 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4015 CMD_DATA_TIM(chp->channel, drive), tim); 4016 } 4017 if (idedma_ctl != 0) { 4018 /* Add software bits in status register */ 4019 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4020 IDEDMA_CTL(chp->channel), 4021 idedma_ctl); 4022 } 4023 pciide_print_modes(cp); 4024 #ifdef __sparc64__ 4025 /* 4026 * The Ultra 5 has a tendency to hang during reboot. This is due 4027 * to the PCI0646U asserting a PCI interrupt line when the chip 4028 * registers claim that it is not. Performing a reset at this 4029 * point appears to eliminate the symptoms. It is likely the 4030 * real cause is still lurking somewhere in the code. 4031 */ 4032 wdcreset(chp, SILENT); 4033 #endif /* __sparc64__ */ 4034 } 4035 4036 void 4037 cmd646_9_irqack(struct channel_softc *chp) 4038 { 4039 u_int32_t priirq, secirq; 4040 struct pciide_channel *cp = (struct pciide_channel *)chp; 4041 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4042 4043 if (chp->channel == 0) { 4044 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 4045 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 4046 } else { 4047 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 4048 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 4049 } 4050 pciide_irqack(chp); 4051 } 4052 4053 void 4054 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4055 { 4056 struct pciide_channel *cp; 4057 int channel; 4058 4059 printf("\n%s: bus-master DMA support present", 4060 sc->sc_wdcdev.sc_dev.dv_xname); 4061 pciide_mapreg_dma(sc, pa); 4062 printf("\n"); 4063 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4064 WDC_CAPABILITY_MODE; 4065 if (sc->sc_dma_ok) { 4066 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4067 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4068 sc->sc_wdcdev.UDMA_cap = 6; 4069 sc->sc_wdcdev.irqack = pciide_irqack; 4070 } 4071 4072 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4073 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4074 sc->sc_wdcdev.PIO_cap = 4; 4075 sc->sc_wdcdev.DMA_cap = 2; 4076 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 4077 4078 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 4079 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 4080 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 4081 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 4082 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4083 cp = &sc->pciide_channels[channel]; 4084 cmd680_channel_map(pa, sc, channel); 4085 if (cp->hw_ok == 0) 4086 continue; 4087 cmd680_setup_channel(&cp->wdc_channel); 4088 } 4089 } 4090 4091 void 4092 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 4093 int channel) 4094 { 4095 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4096 bus_size_t cmdsize, ctlsize; 4097 int interface, i, reg; 4098 static const u_int8_t init_val[] = 4099 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 4100 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 4101 4102 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 4103 interface = PCIIDE_INTERFACE_SETTABLE(0) | 4104 PCIIDE_INTERFACE_SETTABLE(1); 4105 interface |= PCIIDE_INTERFACE_PCI(0) | 4106 PCIIDE_INTERFACE_PCI(1); 4107 } else { 4108 interface = PCI_INTERFACE(pa->pa_class); 4109 } 4110 4111 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4112 cp->name = PCIIDE_CHANNEL_NAME(channel); 4113 cp->wdc_channel.channel = channel; 4114 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4115 4116 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4117 if (cp->wdc_channel.ch_queue == NULL) { 4118 printf("%s %s: " 4119 "cannot allocate channel queue", 4120 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4121 return; 4122 } 4123 4124 /* XXX */ 4125 reg = 0xa2 + channel * 16; 4126 for (i = 0; i < sizeof(init_val); i++) 4127 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 4128 4129 printf("%s: %s %s to %s mode\n", 4130 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 4131 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 4132 "configured" : "wired", 4133 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 4134 "native-PCI" : "compatibility"); 4135 4136 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 4137 if (cp->hw_ok == 0) 4138 return; 4139 pciide_map_compat_intr(pa, cp, channel, interface); 4140 } 4141 4142 void 4143 cmd680_setup_channel(struct channel_softc *chp) 4144 { 4145 struct ata_drive_datas *drvp; 4146 u_int8_t mode, off, scsc; 4147 u_int16_t val; 4148 u_int32_t idedma_ctl; 4149 int drive; 4150 struct pciide_channel *cp = (struct pciide_channel *)chp; 4151 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4152 pci_chipset_tag_t pc = sc->sc_pc; 4153 pcitag_t pa = sc->sc_tag; 4154 static const u_int8_t udma2_tbl[] = 4155 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 4156 static const u_int8_t udma_tbl[] = 4157 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 4158 static const u_int16_t dma_tbl[] = 4159 { 0x2208, 0x10c2, 0x10c1 }; 4160 static const u_int16_t pio_tbl[] = 4161 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 4162 4163 idedma_ctl = 0; 4164 pciide_channel_dma_setup(cp); 4165 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 4166 4167 for (drive = 0; drive < 2; drive++) { 4168 drvp = &chp->ch_drive[drive]; 4169 /* If no drive, skip */ 4170 if ((drvp->drive_flags & DRIVE) == 0) 4171 continue; 4172 mode &= ~(0x03 << (drive * 4)); 4173 if (drvp->drive_flags & DRIVE_UDMA) { 4174 drvp->drive_flags &= ~DRIVE_DMA; 4175 off = 0xa0 + chp->channel * 16; 4176 if (drvp->UDMA_mode > 2 && 4177 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 4178 drvp->UDMA_mode = 2; 4179 scsc = pciide_pci_read(pc, pa, 0x8a); 4180 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 4181 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 4182 scsc = pciide_pci_read(pc, pa, 0x8a); 4183 if ((scsc & 0x30) == 0) 4184 drvp->UDMA_mode = 5; 4185 } 4186 mode |= 0x03 << (drive * 4); 4187 off = 0xac + chp->channel * 16 + drive * 2; 4188 val = pciide_pci_read(pc, pa, off) & ~0x3f; 4189 if (scsc & 0x30) 4190 val |= udma2_tbl[drvp->UDMA_mode]; 4191 else 4192 val |= udma_tbl[drvp->UDMA_mode]; 4193 pciide_pci_write(pc, pa, off, val); 4194 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4195 } else if (drvp->drive_flags & DRIVE_DMA) { 4196 mode |= 0x02 << (drive * 4); 4197 off = 0xa8 + chp->channel * 16 + drive * 2; 4198 val = dma_tbl[drvp->DMA_mode]; 4199 pciide_pci_write(pc, pa, off, val & 0xff); 4200 pciide_pci_write(pc, pa, off, val >> 8); 4201 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4202 } else { 4203 mode |= 0x01 << (drive * 4); 4204 off = 0xa4 + chp->channel * 16 + drive * 2; 4205 val = pio_tbl[drvp->PIO_mode]; 4206 pciide_pci_write(pc, pa, off, val & 0xff); 4207 pciide_pci_write(pc, pa, off, val >> 8); 4208 } 4209 } 4210 4211 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 4212 if (idedma_ctl != 0) { 4213 /* Add software bits in status register */ 4214 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4215 IDEDMA_CTL(chp->channel), 4216 idedma_ctl); 4217 } 4218 pciide_print_modes(cp); 4219 } 4220 4221 /* 4222 * When the Silicon Image 3112 retries a PCI memory read command, 4223 * it may retry it as a memory read multiple command under some 4224 * circumstances. This can totally confuse some PCI controllers, 4225 * so ensure that it will never do this by making sure that the 4226 * Read Threshold (FIFO Read Request Control) field of the FIFO 4227 * Valid Byte Count and Control registers for both channels (BA5 4228 * offset 0x40 and 0x44) are set to be at least as large as the 4229 * cacheline size register. 4230 */ 4231 void 4232 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 4233 { 4234 pcireg_t cls, reg40, reg44; 4235 4236 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4237 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 4238 cls *= 4; 4239 if (cls > 224) { 4240 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4241 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 4242 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 4243 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 4244 cls = 224; 4245 } 4246 if (cls < 32) 4247 cls = 32; 4248 cls = (cls + 31) / 32; 4249 reg40 = ba5_read_4(sc, 0x40); 4250 reg44 = ba5_read_4(sc, 0x44); 4251 if ((reg40 & 0x7) < cls) 4252 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 4253 if ((reg44 & 0x7) < cls) 4254 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 4255 } 4256 4257 void 4258 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4259 { 4260 struct pciide_channel *cp; 4261 bus_size_t cmdsize, ctlsize; 4262 pcireg_t interface, scs_cmd, cfgctl; 4263 int channel; 4264 struct pciide_satalink *sl; 4265 4266 /* Allocate memory for private data */ 4267 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4268 sl = sc->sc_cookie; 4269 4270 sc->chip_unmap = default_chip_unmap; 4271 4272 #define SII3112_RESET_BITS \ 4273 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4274 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4275 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 4276 4277 /* 4278 * Reset everything and then unblock all of the interrupts. 4279 */ 4280 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4281 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4282 scs_cmd | SII3112_RESET_BITS); 4283 delay(50 * 1000); 4284 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4285 scs_cmd & SCS_CMD_BA5_EN); 4286 delay(50 * 1000); 4287 4288 if (scs_cmd & SCS_CMD_BA5_EN) { 4289 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4290 PCI_MAPREG_TYPE_MEM | 4291 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4292 &sl->ba5_st, &sl->ba5_sh, 4293 NULL, NULL, 0) != 0) 4294 printf(": unable to map BA5 register space\n"); 4295 else 4296 sl->ba5_en = 1; 4297 } else { 4298 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 4299 SII3112_PCI_CFGCTL); 4300 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 4301 cfgctl | CFGCTL_BA5INDEN); 4302 } 4303 4304 printf(": DMA"); 4305 pciide_mapreg_dma(sc, pa); 4306 printf("\n"); 4307 4308 /* 4309 * Rev. <= 0x01 of the 3112 have a bug that can cause data 4310 * corruption if DMA transfers cross an 8K boundary. This is 4311 * apparently hard to tickle, but we'll go ahead and play it 4312 * safe. 4313 */ 4314 if (sc->sc_rev <= 0x01) { 4315 sc->sc_dma_maxsegsz = 8192; 4316 sc->sc_dma_boundary = 8192; 4317 } 4318 4319 sii_fixup_cacheline(sc, pa); 4320 4321 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4322 sc->sc_wdcdev.PIO_cap = 4; 4323 if (sc->sc_dma_ok) { 4324 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4325 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4326 sc->sc_wdcdev.irqack = pciide_irqack; 4327 sc->sc_wdcdev.DMA_cap = 2; 4328 sc->sc_wdcdev.UDMA_cap = 6; 4329 } 4330 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4331 4332 /* We can use SControl and SStatus to probe for drives. */ 4333 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4334 4335 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4336 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4337 4338 /* 4339 * The 3112 either identifies itself as a RAID storage device 4340 * or a Misc storage device. Fake up the interface bits for 4341 * what our driver expects. 4342 */ 4343 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4344 interface = PCI_INTERFACE(pa->pa_class); 4345 } else { 4346 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4347 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4348 } 4349 4350 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4351 cp = &sc->pciide_channels[channel]; 4352 if (pciide_chansetup(sc, channel, interface) == 0) 4353 continue; 4354 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4355 pciide_pci_intr); 4356 if (cp->hw_ok == 0) 4357 continue; 4358 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4359 } 4360 } 4361 4362 void 4363 sii3112_setup_channel(struct channel_softc *chp) 4364 { 4365 struct ata_drive_datas *drvp; 4366 int drive; 4367 u_int32_t idedma_ctl, dtm; 4368 struct pciide_channel *cp = (struct pciide_channel *)chp; 4369 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4370 4371 /* setup DMA if needed */ 4372 pciide_channel_dma_setup(cp); 4373 4374 idedma_ctl = 0; 4375 dtm = 0; 4376 4377 for (drive = 0; drive < 2; drive++) { 4378 drvp = &chp->ch_drive[drive]; 4379 /* If no drive, skip */ 4380 if ((drvp->drive_flags & DRIVE) == 0) 4381 continue; 4382 if (drvp->drive_flags & DRIVE_UDMA) { 4383 /* use Ultra/DMA */ 4384 drvp->drive_flags &= ~DRIVE_DMA; 4385 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4386 dtm |= DTM_IDEx_DMA; 4387 } else if (drvp->drive_flags & DRIVE_DMA) { 4388 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4389 dtm |= DTM_IDEx_DMA; 4390 } else { 4391 dtm |= DTM_IDEx_PIO; 4392 } 4393 } 4394 4395 /* 4396 * Nothing to do to setup modes; it is meaningless in S-ATA 4397 * (but many S-ATA drives still want to get the SET_FEATURE 4398 * command). 4399 */ 4400 if (idedma_ctl != 0) { 4401 /* Add software bits in status register */ 4402 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 4403 } 4404 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 4405 pciide_print_modes(cp); 4406 } 4407 4408 void 4409 sii3112_drv_probe(struct channel_softc *chp) 4410 { 4411 struct pciide_channel *cp = (struct pciide_channel *)chp; 4412 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4413 uint32_t scontrol, sstatus; 4414 uint8_t scnt, sn, cl, ch; 4415 int s; 4416 4417 /* 4418 * The 3112 is a 2-port part, and only has one drive per channel 4419 * (each port emulates a master drive). 4420 * 4421 * The 3114 is similar, but has 4 channels. 4422 */ 4423 4424 /* 4425 * Request communication initialization sequence, any speed. 4426 * Performing this is the equivalent of an ATA Reset. 4427 */ 4428 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4429 4430 /* 4431 * XXX We don't yet support SATA power management; disable all 4432 * power management state transitions. 4433 */ 4434 scontrol |= SControl_IPM_NONE; 4435 4436 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4437 delay(50 * 1000); 4438 scontrol &= ~SControl_DET_INIT; 4439 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4440 delay(50 * 1000); 4441 4442 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4443 #if 0 4444 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4445 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4446 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4447 #endif 4448 switch (sstatus & SStatus_DET_mask) { 4449 case SStatus_DET_NODEV: 4450 /* No device; be silent. */ 4451 break; 4452 4453 case SStatus_DET_DEV_NE: 4454 printf("%s: port %d: device connected, but " 4455 "communication not established\n", 4456 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4457 break; 4458 4459 case SStatus_DET_OFFLINE: 4460 printf("%s: port %d: PHY offline\n", 4461 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4462 break; 4463 4464 case SStatus_DET_DEV: 4465 /* 4466 * XXX ATAPI detection doesn't currently work. Don't 4467 * XXX know why. But, it's not like the standard method 4468 * XXX can detect an ATAPI device connected via a SATA/PATA 4469 * XXX bridge, so at least this is no worse. --thorpej 4470 */ 4471 if (chp->_vtbl != NULL) 4472 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4473 else 4474 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4475 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4476 delay(10); /* 400ns delay */ 4477 /* Save register contents. */ 4478 if (chp->_vtbl != NULL) { 4479 scnt = CHP_READ_REG(chp, wdr_seccnt); 4480 sn = CHP_READ_REG(chp, wdr_sector); 4481 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4482 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4483 } else { 4484 scnt = bus_space_read_1(chp->cmd_iot, 4485 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4486 sn = bus_space_read_1(chp->cmd_iot, 4487 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4488 cl = bus_space_read_1(chp->cmd_iot, 4489 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4490 ch = bus_space_read_1(chp->cmd_iot, 4491 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4492 } 4493 #if 0 4494 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4495 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4496 scnt, sn, cl, ch); 4497 #endif 4498 /* 4499 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4500 * cases we get wrong values here, so ignore it. 4501 */ 4502 s = splbio(); 4503 if (cl == 0x14 && ch == 0xeb) 4504 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4505 else 4506 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4507 splx(s); 4508 4509 printf("%s: port %d: device present", 4510 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4511 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4512 case 1: 4513 printf(", speed: 1.5Gb/s"); 4514 break; 4515 case 2: 4516 printf(", speed: 3.0Gb/s"); 4517 break; 4518 } 4519 printf("\n"); 4520 break; 4521 4522 default: 4523 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4524 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4525 } 4526 } 4527 4528 void 4529 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4530 { 4531 struct pciide_channel *cp; 4532 pcireg_t scs_cmd; 4533 pci_intr_handle_t intrhandle; 4534 const char *intrstr; 4535 int channel; 4536 struct pciide_satalink *sl; 4537 4538 /* Allocate memory for private data */ 4539 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4540 sl = sc->sc_cookie; 4541 4542 #define SII3114_RESET_BITS \ 4543 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4544 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4545 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4546 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4547 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4548 4549 /* 4550 * Reset everything and then unblock all of the interrupts. 4551 */ 4552 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4553 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4554 scs_cmd | SII3114_RESET_BITS); 4555 delay(50 * 1000); 4556 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4557 scs_cmd & SCS_CMD_M66EN); 4558 delay(50 * 1000); 4559 4560 /* 4561 * On the 3114, the BA5 register space is always enabled. In 4562 * order to use the 3114 in any sane way, we must use this BA5 4563 * register space, and so we consider it an error if we cannot 4564 * map it. 4565 * 4566 * As a consequence of using BA5, our register mapping is different 4567 * from a normal PCI IDE controller's, and so we are unable to use 4568 * most of the common PCI IDE register mapping functions. 4569 */ 4570 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4571 PCI_MAPREG_TYPE_MEM | 4572 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4573 &sl->ba5_st, &sl->ba5_sh, 4574 NULL, NULL, 0) != 0) { 4575 printf(": unable to map BA5 register space\n"); 4576 return; 4577 } 4578 sl->ba5_en = 1; 4579 4580 /* 4581 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4582 * channel 2. This is required at all times for proper operation 4583 * when using the BA5 register space (otherwise interrupts from 4584 * all 4 channels won't work). 4585 */ 4586 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4587 4588 printf(": DMA"); 4589 sii3114_mapreg_dma(sc, pa); 4590 printf("\n"); 4591 4592 sii_fixup_cacheline(sc, pa); 4593 4594 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4595 sc->sc_wdcdev.PIO_cap = 4; 4596 if (sc->sc_dma_ok) { 4597 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4598 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4599 sc->sc_wdcdev.irqack = pciide_irqack; 4600 sc->sc_wdcdev.DMA_cap = 2; 4601 sc->sc_wdcdev.UDMA_cap = 6; 4602 } 4603 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4604 4605 /* We can use SControl and SStatus to probe for drives. */ 4606 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4607 4608 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4609 sc->sc_wdcdev.nchannels = 4; 4610 4611 /* Map and establish the interrupt handler. */ 4612 if (pci_intr_map(pa, &intrhandle) != 0) { 4613 printf("%s: couldn't map native-PCI interrupt\n", 4614 sc->sc_wdcdev.sc_dev.dv_xname); 4615 return; 4616 } 4617 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4618 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4619 /* XXX */ 4620 pciide_pci_intr, sc, 4621 sc->sc_wdcdev.sc_dev.dv_xname); 4622 if (sc->sc_pci_ih != NULL) { 4623 printf("%s: using %s for native-PCI interrupt\n", 4624 sc->sc_wdcdev.sc_dev.dv_xname, 4625 intrstr ? intrstr : "unknown interrupt"); 4626 } else { 4627 printf("%s: couldn't establish native-PCI interrupt", 4628 sc->sc_wdcdev.sc_dev.dv_xname); 4629 if (intrstr != NULL) 4630 printf(" at %s", intrstr); 4631 printf("\n"); 4632 return; 4633 } 4634 4635 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4636 cp = &sc->pciide_channels[channel]; 4637 if (sii3114_chansetup(sc, channel) == 0) 4638 continue; 4639 sii3114_mapchan(cp); 4640 if (cp->hw_ok == 0) 4641 continue; 4642 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4643 } 4644 } 4645 4646 void 4647 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4648 { 4649 int chan, reg; 4650 bus_size_t size; 4651 struct pciide_satalink *sl = sc->sc_cookie; 4652 4653 sc->sc_wdcdev.dma_arg = sc; 4654 sc->sc_wdcdev.dma_init = pciide_dma_init; 4655 sc->sc_wdcdev.dma_start = pciide_dma_start; 4656 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4657 4658 /* 4659 * Slice off a subregion of BA5 for each of the channel's DMA 4660 * registers. 4661 */ 4662 4663 sc->sc_dma_iot = sl->ba5_st; 4664 for (chan = 0; chan < 4; chan++) { 4665 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4666 size = 4; 4667 if (size > (IDEDMA_SCH_OFFSET - reg)) 4668 size = IDEDMA_SCH_OFFSET - reg; 4669 if (bus_space_subregion(sl->ba5_st, 4670 sl->ba5_sh, 4671 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4672 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4673 sc->sc_dma_ok = 0; 4674 printf(": can't subregion offset " 4675 "%lu size %lu", 4676 (u_long) satalink_ba5_regmap[ 4677 chan].ba5_IDEDMA_CMD + reg, 4678 (u_long) size); 4679 return; 4680 } 4681 } 4682 } 4683 4684 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4685 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4686 sc->sc_dmactl_read = sii3114_dmactl_read; 4687 sc->sc_dmactl_write = sii3114_dmactl_write; 4688 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4689 4690 /* DMA registers all set up! */ 4691 sc->sc_dmat = pa->pa_dmat; 4692 sc->sc_dma_ok = 1; 4693 } 4694 4695 int 4696 sii3114_chansetup(struct pciide_softc *sc, int channel) 4697 { 4698 static const char *channel_names[] = { 4699 "port 0", 4700 "port 1", 4701 "port 2", 4702 "port 3", 4703 }; 4704 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4705 4706 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4707 4708 /* 4709 * We must always keep the Interrupt Steering bit set in channel 2's 4710 * IDEDMA_CMD register. 4711 */ 4712 if (channel == 2) 4713 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4714 4715 cp->name = channel_names[channel]; 4716 cp->wdc_channel.channel = channel; 4717 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4718 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4719 if (cp->wdc_channel.ch_queue == NULL) { 4720 printf("%s %s channel: " 4721 "cannot allocate channel queue", 4722 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4723 return (0); 4724 } 4725 return (1); 4726 } 4727 4728 void 4729 sii3114_mapchan(struct pciide_channel *cp) 4730 { 4731 struct channel_softc *wdc_cp = &cp->wdc_channel; 4732 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4733 struct pciide_satalink *sl = sc->sc_cookie; 4734 int chan = wdc_cp->channel; 4735 int i; 4736 4737 cp->hw_ok = 0; 4738 cp->compat = 0; 4739 cp->ih = sc->sc_pci_ih; 4740 4741 sl->regs[chan].cmd_iot = sl->ba5_st; 4742 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4743 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4744 9, &sl->regs[chan].cmd_baseioh) != 0) { 4745 printf("%s: couldn't subregion %s cmd base\n", 4746 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4747 return; 4748 } 4749 4750 sl->regs[chan].ctl_iot = sl->ba5_st; 4751 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4752 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4753 1, &cp->ctl_baseioh) != 0) { 4754 printf("%s: couldn't subregion %s ctl base\n", 4755 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4756 return; 4757 } 4758 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4759 4760 for (i = 0; i < WDC_NREG; i++) { 4761 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4762 sl->regs[chan].cmd_baseioh, 4763 i, i == 0 ? 4 : 1, 4764 &sl->regs[chan].cmd_iohs[i]) != 0) { 4765 printf("%s: couldn't subregion %s channel " 4766 "cmd regs\n", 4767 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4768 return; 4769 } 4770 } 4771 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4772 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4773 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4774 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4775 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4776 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4777 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4778 wdcattach(wdc_cp); 4779 cp->hw_ok = 1; 4780 } 4781 4782 u_int8_t 4783 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4784 { 4785 struct pciide_channel *cp = (struct pciide_channel *)chp; 4786 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4787 struct pciide_satalink *sl = sc->sc_cookie; 4788 4789 if (reg & _WDC_AUX) 4790 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4791 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4792 else 4793 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4794 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4795 } 4796 4797 void 4798 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4799 { 4800 struct pciide_channel *cp = (struct pciide_channel *)chp; 4801 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4802 struct pciide_satalink *sl = sc->sc_cookie; 4803 4804 if (reg & _WDC_AUX) 4805 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4806 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4807 else 4808 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4809 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4810 0, val); 4811 } 4812 4813 u_int8_t 4814 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4815 { 4816 struct pciide_satalink *sl = sc->sc_cookie; 4817 4818 return (bus_space_read_1(sc->sc_dma_iot, 4819 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4820 } 4821 4822 void 4823 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4824 { 4825 struct pciide_satalink *sl = sc->sc_cookie; 4826 4827 bus_space_write_1(sc->sc_dma_iot, 4828 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4829 } 4830 4831 u_int8_t 4832 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4833 { 4834 struct pciide_satalink *sl = sc->sc_cookie; 4835 4836 return (bus_space_read_1(sc->sc_dma_iot, 4837 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4838 } 4839 4840 void 4841 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4842 { 4843 struct pciide_satalink *sl = sc->sc_cookie; 4844 4845 bus_space_write_1(sc->sc_dma_iot, 4846 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4847 } 4848 4849 void 4850 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4851 { 4852 struct pciide_satalink *sl = sc->sc_cookie; 4853 4854 bus_space_write_4(sc->sc_dma_iot, 4855 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4856 } 4857 4858 void 4859 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4860 { 4861 struct pciide_channel *cp; 4862 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4863 bus_size_t cmdsize, ctlsize; 4864 struct pciide_cy *cy; 4865 4866 /* Allocate memory for private data */ 4867 sc->sc_cookie = malloc(sizeof(*cy), M_DEVBUF, M_NOWAIT | M_ZERO); 4868 cy = sc->sc_cookie; 4869 4870 /* 4871 * this chip has 2 PCI IDE functions, one for primary and one for 4872 * secondary. So we need to call pciide_mapregs_compat() with 4873 * the real channel 4874 */ 4875 if (pa->pa_function == 1) { 4876 cy->cy_compatchan = 0; 4877 } else if (pa->pa_function == 2) { 4878 cy->cy_compatchan = 1; 4879 } else { 4880 printf(": unexpected PCI function %d\n", pa->pa_function); 4881 return; 4882 } 4883 4884 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4885 printf(": DMA"); 4886 pciide_mapreg_dma(sc, pa); 4887 } else { 4888 printf(": no DMA"); 4889 sc->sc_dma_ok = 0; 4890 } 4891 4892 cy->cy_handle = cy82c693_init(pa->pa_iot); 4893 if (cy->cy_handle == NULL) { 4894 printf(", (unable to map ctl registers)"); 4895 sc->sc_dma_ok = 0; 4896 } 4897 4898 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4899 WDC_CAPABILITY_MODE; 4900 if (sc->sc_dma_ok) { 4901 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4902 sc->sc_wdcdev.irqack = pciide_irqack; 4903 } 4904 sc->sc_wdcdev.PIO_cap = 4; 4905 sc->sc_wdcdev.DMA_cap = 2; 4906 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4907 4908 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4909 sc->sc_wdcdev.nchannels = 1; 4910 4911 /* Only one channel for this chip; if we are here it's enabled */ 4912 cp = &sc->pciide_channels[0]; 4913 sc->wdc_chanarray[0] = &cp->wdc_channel; 4914 cp->name = PCIIDE_CHANNEL_NAME(0); 4915 cp->wdc_channel.channel = 0; 4916 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4917 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4918 if (cp->wdc_channel.ch_queue == NULL) { 4919 printf(": cannot allocate channel queue\n"); 4920 return; 4921 } 4922 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4923 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4924 "configured" : "wired"); 4925 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4926 printf("native-PCI\n"); 4927 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4928 pciide_pci_intr); 4929 } else { 4930 printf("compatibility\n"); 4931 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4932 &cmdsize, &ctlsize); 4933 } 4934 4935 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4936 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4937 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4938 if (cp->hw_ok == 0) 4939 return; 4940 wdcattach(&cp->wdc_channel); 4941 if (pciide_chan_candisable(cp)) { 4942 pci_conf_write(sc->sc_pc, sc->sc_tag, 4943 PCI_COMMAND_STATUS_REG, 0); 4944 } 4945 if (cp->hw_ok == 0) { 4946 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4947 interface); 4948 return; 4949 } 4950 4951 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4952 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4953 cy693_setup_channel(&cp->wdc_channel); 4954 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4955 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4956 } 4957 4958 void 4959 cy693_setup_channel(struct channel_softc *chp) 4960 { 4961 struct ata_drive_datas *drvp; 4962 int drive; 4963 u_int32_t cy_cmd_ctrl; 4964 u_int32_t idedma_ctl; 4965 struct pciide_channel *cp = (struct pciide_channel *)chp; 4966 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4967 int dma_mode = -1; 4968 struct pciide_cy *cy = sc->sc_cookie; 4969 4970 cy_cmd_ctrl = idedma_ctl = 0; 4971 4972 /* setup DMA if needed */ 4973 pciide_channel_dma_setup(cp); 4974 4975 for (drive = 0; drive < 2; drive++) { 4976 drvp = &chp->ch_drive[drive]; 4977 /* If no drive, skip */ 4978 if ((drvp->drive_flags & DRIVE) == 0) 4979 continue; 4980 /* add timing values, setup DMA if needed */ 4981 if (drvp->drive_flags & DRIVE_DMA) { 4982 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4983 /* use Multiword DMA */ 4984 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 4985 dma_mode = drvp->DMA_mode; 4986 } 4987 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4988 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 4989 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4990 CY_CMD_CTRL_IOW_REC_OFF(drive)); 4991 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4992 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 4993 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4994 CY_CMD_CTRL_IOR_REC_OFF(drive)); 4995 } 4996 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 4997 chp->ch_drive[0].DMA_mode = dma_mode; 4998 chp->ch_drive[1].DMA_mode = dma_mode; 4999 5000 if (dma_mode == -1) 5001 dma_mode = 0; 5002 5003 if (cy->cy_handle != NULL) { 5004 /* Note: `multiple' is implied. */ 5005 cy82c693_write(cy->cy_handle, 5006 (cy->cy_compatchan == 0) ? 5007 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 5008 } 5009 5010 pciide_print_modes(cp); 5011 5012 if (idedma_ctl != 0) { 5013 /* Add software bits in status register */ 5014 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5015 IDEDMA_CTL(chp->channel), idedma_ctl); 5016 } 5017 } 5018 5019 static struct sis_hostbr_type { 5020 u_int16_t id; 5021 u_int8_t rev; 5022 u_int8_t udma_mode; 5023 char *name; 5024 u_int8_t type; 5025 #define SIS_TYPE_NOUDMA 0 5026 #define SIS_TYPE_66 1 5027 #define SIS_TYPE_100OLD 2 5028 #define SIS_TYPE_100NEW 3 5029 #define SIS_TYPE_133OLD 4 5030 #define SIS_TYPE_133NEW 5 5031 #define SIS_TYPE_SOUTH 6 5032 } sis_hostbr_type[] = { 5033 /* Most infos here are from sos@freebsd.org */ 5034 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 5035 #if 0 5036 /* 5037 * controllers associated to a rev 0x2 530 Host to PCI Bridge 5038 * have problems with UDMA (info provided by Christos) 5039 */ 5040 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 5041 #endif 5042 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 5043 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 5044 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 5045 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 5046 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 5047 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 5048 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 5049 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 5050 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 5051 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 5052 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 5053 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 5054 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 5055 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 5056 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 5057 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 5058 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 5059 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 5060 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 5061 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 5062 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 5063 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 5064 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 5065 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 5066 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 5067 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 5068 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 5069 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 5070 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 5071 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 5072 /* 5073 * From sos@freebsd.org: the 0x961 ID will never be found in real world 5074 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 5075 */ 5076 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 5077 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 5078 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 5079 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW}, 5080 {PCI_PRODUCT_SIS_966, 0x00, 6, "966", SIS_TYPE_133NEW}, 5081 {PCI_PRODUCT_SIS_968, 0x00, 6, "968", SIS_TYPE_133NEW} 5082 }; 5083 5084 static struct sis_hostbr_type *sis_hostbr_type_match; 5085 5086 int 5087 sis_hostbr_match(struct pci_attach_args *pa) 5088 { 5089 int i; 5090 5091 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 5092 return (0); 5093 sis_hostbr_type_match = NULL; 5094 for (i = 0; 5095 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 5096 i++) { 5097 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 5098 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 5099 sis_hostbr_type_match = &sis_hostbr_type[i]; 5100 } 5101 return (sis_hostbr_type_match != NULL); 5102 } 5103 5104 int 5105 sis_south_match(struct pci_attach_args *pa) 5106 { 5107 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 5108 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 5109 PCI_REVISION(pa->pa_class) >= 0x10); 5110 } 5111 5112 void 5113 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5114 { 5115 struct pciide_channel *cp; 5116 int channel; 5117 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 5118 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5119 int rev = sc->sc_rev; 5120 bus_size_t cmdsize, ctlsize; 5121 struct pciide_sis *sis; 5122 5123 /* Allocate memory for private data */ 5124 sc->sc_cookie = malloc(sizeof(*sis), M_DEVBUF, M_NOWAIT | M_ZERO); 5125 sis = sc->sc_cookie; 5126 5127 pci_find_device(NULL, sis_hostbr_match); 5128 5129 if (sis_hostbr_type_match) { 5130 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 5131 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 5132 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5133 SIS_REG_57) & 0x7f); 5134 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 5135 sis->sis_type = SIS_TYPE_133NEW; 5136 sc->sc_wdcdev.UDMA_cap = 5137 sis_hostbr_type_match->udma_mode; 5138 } else { 5139 if (pci_find_device(NULL, sis_south_match)) { 5140 sis->sis_type = SIS_TYPE_133OLD; 5141 sc->sc_wdcdev.UDMA_cap = 5142 sis_hostbr_type_match->udma_mode; 5143 } else { 5144 sis->sis_type = SIS_TYPE_100NEW; 5145 sc->sc_wdcdev.UDMA_cap = 5146 sis_hostbr_type_match->udma_mode; 5147 } 5148 } 5149 } else { 5150 sis->sis_type = sis_hostbr_type_match->type; 5151 sc->sc_wdcdev.UDMA_cap = 5152 sis_hostbr_type_match->udma_mode; 5153 } 5154 printf(": %s", sis_hostbr_type_match->name); 5155 } else { 5156 printf(": 5597/5598"); 5157 if (rev >= 0xd0) { 5158 sc->sc_wdcdev.UDMA_cap = 2; 5159 sis->sis_type = SIS_TYPE_66; 5160 } else { 5161 sc->sc_wdcdev.UDMA_cap = 0; 5162 sis->sis_type = SIS_TYPE_NOUDMA; 5163 } 5164 } 5165 5166 printf(": DMA"); 5167 pciide_mapreg_dma(sc, pa); 5168 5169 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5170 WDC_CAPABILITY_MODE; 5171 if (sc->sc_dma_ok) { 5172 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5173 sc->sc_wdcdev.irqack = pciide_irqack; 5174 if (sis->sis_type >= SIS_TYPE_66) 5175 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5176 } 5177 5178 sc->sc_wdcdev.PIO_cap = 4; 5179 sc->sc_wdcdev.DMA_cap = 2; 5180 5181 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5182 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5183 switch (sis->sis_type) { 5184 case SIS_TYPE_NOUDMA: 5185 case SIS_TYPE_66: 5186 case SIS_TYPE_100OLD: 5187 sc->sc_wdcdev.set_modes = sis_setup_channel; 5188 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 5189 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 5190 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 5191 break; 5192 case SIS_TYPE_100NEW: 5193 case SIS_TYPE_133OLD: 5194 sc->sc_wdcdev.set_modes = sis_setup_channel; 5195 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 5196 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 5197 break; 5198 case SIS_TYPE_133NEW: 5199 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 5200 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 5201 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 5202 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 5203 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 5204 break; 5205 } 5206 5207 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5208 5209 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5210 cp = &sc->pciide_channels[channel]; 5211 if (pciide_chansetup(sc, channel, interface) == 0) 5212 continue; 5213 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 5214 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 5215 printf("%s: %s ignored (disabled)\n", 5216 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5217 continue; 5218 } 5219 pciide_map_compat_intr(pa, cp, channel, interface); 5220 if (cp->hw_ok == 0) 5221 continue; 5222 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5223 pciide_pci_intr); 5224 if (cp->hw_ok == 0) { 5225 pciide_unmap_compat_intr(pa, cp, channel, interface); 5226 continue; 5227 } 5228 if (pciide_chan_candisable(cp)) { 5229 if (channel == 0) 5230 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 5231 else 5232 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 5233 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 5234 sis_ctr0); 5235 } 5236 if (cp->hw_ok == 0) { 5237 pciide_unmap_compat_intr(pa, cp, channel, interface); 5238 continue; 5239 } 5240 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5241 } 5242 } 5243 5244 void 5245 sis96x_setup_channel(struct channel_softc *chp) 5246 { 5247 struct ata_drive_datas *drvp; 5248 int drive; 5249 u_int32_t sis_tim; 5250 u_int32_t idedma_ctl; 5251 int regtim; 5252 struct pciide_channel *cp = (struct pciide_channel *)chp; 5253 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5254 5255 sis_tim = 0; 5256 idedma_ctl = 0; 5257 /* setup DMA if needed */ 5258 pciide_channel_dma_setup(cp); 5259 5260 for (drive = 0; drive < 2; drive++) { 5261 regtim = SIS_TIM133( 5262 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 5263 chp->channel, drive); 5264 drvp = &chp->ch_drive[drive]; 5265 /* If no drive, skip */ 5266 if ((drvp->drive_flags & DRIVE) == 0) 5267 continue; 5268 /* add timing values, setup DMA if needed */ 5269 if (drvp->drive_flags & DRIVE_UDMA) { 5270 /* use Ultra/DMA */ 5271 drvp->drive_flags &= ~DRIVE_DMA; 5272 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5273 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 5274 if (drvp->UDMA_mode > 2) 5275 drvp->UDMA_mode = 2; 5276 } 5277 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 5278 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5279 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5280 } else if (drvp->drive_flags & DRIVE_DMA) { 5281 /* 5282 * use Multiword DMA 5283 * Timings will be used for both PIO and DMA, 5284 * so adjust DMA mode if needed 5285 */ 5286 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5287 drvp->PIO_mode = drvp->DMA_mode + 2; 5288 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5289 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5290 drvp->PIO_mode - 2 : 0; 5291 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 5292 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5293 } else { 5294 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5295 } 5296 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 5297 "channel %d drive %d: 0x%x (reg 0x%x)\n", 5298 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 5299 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 5300 } 5301 if (idedma_ctl != 0) { 5302 /* Add software bits in status register */ 5303 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5304 IDEDMA_CTL(chp->channel), idedma_ctl); 5305 } 5306 pciide_print_modes(cp); 5307 } 5308 5309 void 5310 sis_setup_channel(struct channel_softc *chp) 5311 { 5312 struct ata_drive_datas *drvp; 5313 int drive; 5314 u_int32_t sis_tim; 5315 u_int32_t idedma_ctl; 5316 struct pciide_channel *cp = (struct pciide_channel *)chp; 5317 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5318 struct pciide_sis *sis = sc->sc_cookie; 5319 5320 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 5321 "channel %d 0x%x\n", chp->channel, 5322 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 5323 DEBUG_PROBE); 5324 sis_tim = 0; 5325 idedma_ctl = 0; 5326 /* setup DMA if needed */ 5327 pciide_channel_dma_setup(cp); 5328 5329 for (drive = 0; drive < 2; drive++) { 5330 drvp = &chp->ch_drive[drive]; 5331 /* If no drive, skip */ 5332 if ((drvp->drive_flags & DRIVE) == 0) 5333 continue; 5334 /* add timing values, setup DMA if needed */ 5335 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5336 (drvp->drive_flags & DRIVE_UDMA) == 0) 5337 goto pio; 5338 5339 if (drvp->drive_flags & DRIVE_UDMA) { 5340 /* use Ultra/DMA */ 5341 drvp->drive_flags &= ~DRIVE_DMA; 5342 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5343 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 5344 if (drvp->UDMA_mode > 2) 5345 drvp->UDMA_mode = 2; 5346 } 5347 switch (sis->sis_type) { 5348 case SIS_TYPE_66: 5349 case SIS_TYPE_100OLD: 5350 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 5351 SIS_TIM66_UDMA_TIME_OFF(drive); 5352 break; 5353 case SIS_TYPE_100NEW: 5354 sis_tim |= 5355 sis_udma100new_tim[drvp->UDMA_mode] << 5356 SIS_TIM100_UDMA_TIME_OFF(drive); 5357 break; 5358 case SIS_TYPE_133OLD: 5359 sis_tim |= 5360 sis_udma133old_tim[drvp->UDMA_mode] << 5361 SIS_TIM100_UDMA_TIME_OFF(drive); 5362 break; 5363 default: 5364 printf("unknown SiS IDE type %d\n", 5365 sis->sis_type); 5366 } 5367 } else { 5368 /* 5369 * use Multiword DMA 5370 * Timings will be used for both PIO and DMA, 5371 * so adjust DMA mode if needed 5372 */ 5373 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5374 drvp->PIO_mode = drvp->DMA_mode + 2; 5375 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5376 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5377 drvp->PIO_mode - 2 : 0; 5378 if (drvp->DMA_mode == 0) 5379 drvp->PIO_mode = 0; 5380 } 5381 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5382 pio: switch (sis->sis_type) { 5383 case SIS_TYPE_NOUDMA: 5384 case SIS_TYPE_66: 5385 case SIS_TYPE_100OLD: 5386 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5387 SIS_TIM66_ACT_OFF(drive); 5388 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5389 SIS_TIM66_REC_OFF(drive); 5390 break; 5391 case SIS_TYPE_100NEW: 5392 case SIS_TYPE_133OLD: 5393 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5394 SIS_TIM100_ACT_OFF(drive); 5395 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5396 SIS_TIM100_REC_OFF(drive); 5397 break; 5398 default: 5399 printf("unknown SiS IDE type %d\n", 5400 sis->sis_type); 5401 } 5402 } 5403 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 5404 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 5405 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 5406 if (idedma_ctl != 0) { 5407 /* Add software bits in status register */ 5408 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5409 IDEDMA_CTL(chp->channel), idedma_ctl); 5410 } 5411 pciide_print_modes(cp); 5412 } 5413 5414 void 5415 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5416 { 5417 struct pciide_channel *cp; 5418 int channel; 5419 pcireg_t interface, ctl; 5420 bus_size_t cmdsize, ctlsize; 5421 5422 printf(": DMA"); 5423 pciide_mapreg_dma(sc, pa); 5424 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 5425 5426 if (sc->sc_dma_ok) { 5427 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5428 sc->sc_wdcdev.irqack = natsemi_irqack; 5429 } 5430 5431 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5432 5433 /* 5434 * Mask off interrupts from both channels, appropriate channel(s) 5435 * will be unmasked later. 5436 */ 5437 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5438 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5439 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5440 5441 sc->sc_wdcdev.PIO_cap = 4; 5442 sc->sc_wdcdev.DMA_cap = 2; 5443 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5444 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5445 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5446 5447 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5448 PCI_CLASS_REG)); 5449 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5450 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5451 5452 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5453 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5454 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5455 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5456 else 5457 ctl |= NATSEMI_CTRL1_INTAMASK; 5458 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5459 5460 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5461 cp = &sc->pciide_channels[channel]; 5462 if (pciide_chansetup(sc, channel, interface) == 0) 5463 continue; 5464 5465 pciide_map_compat_intr(pa, cp, channel, interface); 5466 if (cp->hw_ok == 0) 5467 continue; 5468 5469 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5470 natsemi_pci_intr); 5471 if (cp->hw_ok == 0) { 5472 pciide_unmap_compat_intr(pa, cp, channel, interface); 5473 continue; 5474 } 5475 natsemi_setup_channel(&cp->wdc_channel); 5476 } 5477 } 5478 5479 void 5480 natsemi_setup_channel(struct channel_softc *chp) 5481 { 5482 struct ata_drive_datas *drvp; 5483 int drive, ndrives = 0; 5484 u_int32_t idedma_ctl = 0; 5485 struct pciide_channel *cp = (struct pciide_channel *)chp; 5486 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5487 u_int8_t tim; 5488 5489 /* setup DMA if needed */ 5490 pciide_channel_dma_setup(cp); 5491 5492 for (drive = 0; drive < 2; drive++) { 5493 drvp = &chp->ch_drive[drive]; 5494 /* If no drive, skip */ 5495 if ((drvp->drive_flags & DRIVE) == 0) 5496 continue; 5497 5498 ndrives++; 5499 /* add timing values, setup DMA if needed */ 5500 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5501 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5502 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5503 } else { 5504 /* 5505 * use Multiword DMA 5506 * Timings will be used for both PIO and DMA, 5507 * so adjust DMA mode if needed 5508 */ 5509 if (drvp->PIO_mode >= 3 && 5510 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5511 drvp->DMA_mode = drvp->PIO_mode - 2; 5512 } 5513 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5514 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5515 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5516 } 5517 5518 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5519 NATSEMI_RTREG(chp->channel, drive), tim); 5520 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5521 NATSEMI_WTREG(chp->channel, drive), tim); 5522 } 5523 if (idedma_ctl != 0) { 5524 /* Add software bits in status register */ 5525 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5526 IDEDMA_CTL(chp->channel), idedma_ctl); 5527 } 5528 if (ndrives > 0) { 5529 /* Unmask the channel if at least one drive is found */ 5530 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5531 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5532 ~(NATSEMI_CHMASK(chp->channel))); 5533 } 5534 5535 pciide_print_modes(cp); 5536 5537 /* Go ahead and ack interrupts generated during probe. */ 5538 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5539 IDEDMA_CTL(chp->channel), 5540 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5541 IDEDMA_CTL(chp->channel))); 5542 } 5543 5544 void 5545 natsemi_irqack(struct channel_softc *chp) 5546 { 5547 struct pciide_channel *cp = (struct pciide_channel *)chp; 5548 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5549 u_int8_t clr; 5550 5551 /* The "clear" bits are in the wrong register *sigh* */ 5552 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5553 IDEDMA_CMD(chp->channel)); 5554 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5555 IDEDMA_CTL(chp->channel)) & 5556 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5557 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5558 IDEDMA_CMD(chp->channel), clr); 5559 } 5560 5561 int 5562 natsemi_pci_intr(void *arg) 5563 { 5564 struct pciide_softc *sc = arg; 5565 struct pciide_channel *cp; 5566 struct channel_softc *wdc_cp; 5567 int i, rv, crv; 5568 u_int8_t msk; 5569 5570 rv = 0; 5571 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5572 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5573 cp = &sc->pciide_channels[i]; 5574 wdc_cp = &cp->wdc_channel; 5575 5576 /* If a compat channel skip. */ 5577 if (cp->compat) 5578 continue; 5579 5580 /* If this channel is masked, skip it. */ 5581 if (msk & NATSEMI_CHMASK(i)) 5582 continue; 5583 5584 if (pciide_intr_flag(cp) == 0) 5585 continue; 5586 5587 crv = wdcintr(wdc_cp); 5588 if (crv == 0) 5589 ; /* leave rv alone */ 5590 else if (crv == 1) 5591 rv = 1; /* claim the intr */ 5592 else if (rv == 0) /* crv should be -1 in this case */ 5593 rv = crv; /* if we've done no better, take it */ 5594 } 5595 return (rv); 5596 } 5597 5598 void 5599 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5600 { 5601 struct pciide_channel *cp; 5602 int channel; 5603 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5604 bus_size_t cmdsize, ctlsize; 5605 5606 printf(": DMA"); 5607 pciide_mapreg_dma(sc, pa); 5608 5609 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5610 WDC_CAPABILITY_MODE; 5611 if (sc->sc_dma_ok) { 5612 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5613 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5614 sc->sc_wdcdev.irqack = pciide_irqack; 5615 } 5616 sc->sc_wdcdev.PIO_cap = 4; 5617 sc->sc_wdcdev.DMA_cap = 2; 5618 sc->sc_wdcdev.UDMA_cap = 2; 5619 5620 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5621 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5622 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5623 5624 /* 5625 * Soekris net4801 errata 0003: 5626 * 5627 * The SC1100 built in busmaster IDE controller is pretty standard, 5628 * but have two bugs: data transfers need to be dword aligned and 5629 * it cannot do an exact 64Kbyte data transfer. 5630 * 5631 * Assume that reducing maximum segment size by one page 5632 * will be enough, and restrict boundary too for extra certainty. 5633 */ 5634 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5635 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5636 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5637 } 5638 5639 /* 5640 * This chip seems to be unable to do one-sector transfers 5641 * using DMA. 5642 */ 5643 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5644 5645 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5646 5647 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5648 cp = &sc->pciide_channels[channel]; 5649 if (pciide_chansetup(sc, channel, interface) == 0) 5650 continue; 5651 pciide_map_compat_intr(pa, cp, channel, interface); 5652 if (cp->hw_ok == 0) 5653 continue; 5654 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5655 pciide_pci_intr); 5656 if (cp->hw_ok == 0) { 5657 pciide_unmap_compat_intr(pa, cp, channel, interface); 5658 continue; 5659 } 5660 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5661 } 5662 } 5663 5664 void 5665 ns_scx200_setup_channel(struct channel_softc *chp) 5666 { 5667 struct ata_drive_datas *drvp; 5668 int drive, mode; 5669 u_int32_t idedma_ctl; 5670 struct pciide_channel *cp = (struct pciide_channel*)chp; 5671 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5672 int channel = chp->channel; 5673 int pioformat; 5674 pcireg_t piotim, dmatim; 5675 5676 /* Setup DMA if needed */ 5677 pciide_channel_dma_setup(cp); 5678 5679 idedma_ctl = 0; 5680 5681 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5682 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5683 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5684 DEBUG_PROBE); 5685 5686 /* Per channel settings */ 5687 for (drive = 0; drive < 2; drive++) { 5688 drvp = &chp->ch_drive[drive]; 5689 5690 /* If no drive, skip */ 5691 if ((drvp->drive_flags & DRIVE) == 0) 5692 continue; 5693 5694 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5695 SCx200_TIM_PIO(channel, drive)); 5696 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5697 SCx200_TIM_DMA(channel, drive)); 5698 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5699 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5700 piotim, dmatim), DEBUG_PROBE); 5701 5702 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5703 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5704 /* Setup UltraDMA mode */ 5705 drvp->drive_flags &= ~DRIVE_DMA; 5706 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5707 dmatim = scx200_udma33[drvp->UDMA_mode]; 5708 mode = drvp->PIO_mode; 5709 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5710 (drvp->drive_flags & DRIVE_DMA) != 0) { 5711 /* Setup multiword DMA mode */ 5712 drvp->drive_flags &= ~DRIVE_UDMA; 5713 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5714 dmatim = scx200_dma33[drvp->DMA_mode]; 5715 5716 /* mode = min(pio, dma + 2) */ 5717 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5718 mode = drvp->PIO_mode; 5719 else 5720 mode = drvp->DMA_mode + 2; 5721 } else { 5722 mode = drvp->PIO_mode; 5723 } 5724 5725 /* Setup PIO mode */ 5726 drvp->PIO_mode = mode; 5727 if (mode < 2) 5728 drvp->DMA_mode = 0; 5729 else 5730 drvp->DMA_mode = mode - 2; 5731 5732 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5733 5734 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5735 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5736 piotim, dmatim), DEBUG_PROBE); 5737 5738 pci_conf_write(sc->sc_pc, sc->sc_tag, 5739 SCx200_TIM_PIO(channel, drive), piotim); 5740 pci_conf_write(sc->sc_pc, sc->sc_tag, 5741 SCx200_TIM_DMA(channel, drive), dmatim); 5742 } 5743 5744 if (idedma_ctl != 0) { 5745 /* Add software bits in status register */ 5746 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5747 IDEDMA_CTL(channel), idedma_ctl); 5748 } 5749 5750 pciide_print_modes(cp); 5751 } 5752 5753 void 5754 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5755 { 5756 struct pciide_channel *cp; 5757 int channel; 5758 pcireg_t cr, interface; 5759 bus_size_t cmdsize, ctlsize; 5760 int rev = sc->sc_rev; 5761 5762 printf(": DMA"); 5763 pciide_mapreg_dma(sc, pa); 5764 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5765 WDC_CAPABILITY_MODE; 5766 5767 if (sc->sc_dma_ok) { 5768 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5769 if (rev >= 0x20) { 5770 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5771 if (rev >= 0xC4) 5772 sc->sc_wdcdev.UDMA_cap = 5; 5773 else if (rev >= 0xC2) 5774 sc->sc_wdcdev.UDMA_cap = 4; 5775 else 5776 sc->sc_wdcdev.UDMA_cap = 2; 5777 } 5778 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5779 sc->sc_wdcdev.irqack = pciide_irqack; 5780 if (rev <= 0xC4) 5781 sc->sc_wdcdev.dma_init = acer_dma_init; 5782 } 5783 5784 sc->sc_wdcdev.PIO_cap = 4; 5785 sc->sc_wdcdev.DMA_cap = 2; 5786 sc->sc_wdcdev.set_modes = acer_setup_channel; 5787 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5788 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5789 5790 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5791 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5792 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5793 5794 /* Enable "microsoft register bits" R/W. */ 5795 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5796 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5797 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5798 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5799 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5800 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5801 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5802 ~ACER_CHANSTATUSREGS_RO); 5803 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5804 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5805 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5806 /* Don't use cr, re-read the real register content instead */ 5807 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5808 PCI_CLASS_REG)); 5809 5810 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5811 5812 /* From linux: enable "Cable Detection" */ 5813 if (rev >= 0xC2) 5814 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5815 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5816 | ACER_0x4B_CDETECT); 5817 5818 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5819 cp = &sc->pciide_channels[channel]; 5820 if (pciide_chansetup(sc, channel, interface) == 0) 5821 continue; 5822 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5823 printf("%s: %s ignored (disabled)\n", 5824 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5825 continue; 5826 } 5827 pciide_map_compat_intr(pa, cp, channel, interface); 5828 if (cp->hw_ok == 0) 5829 continue; 5830 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5831 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5832 if (cp->hw_ok == 0) { 5833 pciide_unmap_compat_intr(pa, cp, channel, interface); 5834 continue; 5835 } 5836 if (pciide_chan_candisable(cp)) { 5837 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5838 pci_conf_write(sc->sc_pc, sc->sc_tag, 5839 PCI_CLASS_REG, cr); 5840 } 5841 if (cp->hw_ok == 0) { 5842 pciide_unmap_compat_intr(pa, cp, channel, interface); 5843 continue; 5844 } 5845 acer_setup_channel(&cp->wdc_channel); 5846 } 5847 } 5848 5849 void 5850 acer_setup_channel(struct channel_softc *chp) 5851 { 5852 struct ata_drive_datas *drvp; 5853 int drive; 5854 u_int32_t acer_fifo_udma; 5855 u_int32_t idedma_ctl; 5856 struct pciide_channel *cp = (struct pciide_channel *)chp; 5857 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5858 5859 idedma_ctl = 0; 5860 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5861 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5862 acer_fifo_udma), DEBUG_PROBE); 5863 /* setup DMA if needed */ 5864 pciide_channel_dma_setup(cp); 5865 5866 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5867 DRIVE_UDMA) { /* check 80 pins cable */ 5868 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5869 ACER_0x4A_80PIN(chp->channel)) { 5870 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5871 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5872 DEBUG_PROBE); 5873 if (chp->ch_drive[0].UDMA_mode > 2) 5874 chp->ch_drive[0].UDMA_mode = 2; 5875 if (chp->ch_drive[1].UDMA_mode > 2) 5876 chp->ch_drive[1].UDMA_mode = 2; 5877 } 5878 } 5879 5880 for (drive = 0; drive < 2; drive++) { 5881 drvp = &chp->ch_drive[drive]; 5882 /* If no drive, skip */ 5883 if ((drvp->drive_flags & DRIVE) == 0) 5884 continue; 5885 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5886 "channel %d drive %d 0x%x\n", chp->channel, drive, 5887 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5888 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5889 /* clear FIFO/DMA mode */ 5890 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5891 ACER_UDMA_EN(chp->channel, drive) | 5892 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5893 5894 /* add timing values, setup DMA if needed */ 5895 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5896 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5897 acer_fifo_udma |= 5898 ACER_FTH_OPL(chp->channel, drive, 0x1); 5899 goto pio; 5900 } 5901 5902 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5903 if (drvp->drive_flags & DRIVE_UDMA) { 5904 /* use Ultra/DMA */ 5905 drvp->drive_flags &= ~DRIVE_DMA; 5906 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5907 acer_fifo_udma |= 5908 ACER_UDMA_TIM(chp->channel, drive, 5909 acer_udma[drvp->UDMA_mode]); 5910 /* XXX disable if one drive < UDMA3 ? */ 5911 if (drvp->UDMA_mode >= 3) { 5912 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5913 ACER_0x4B, 5914 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5915 ACER_0x4B) | ACER_0x4B_UDMA66); 5916 } 5917 } else { 5918 /* 5919 * use Multiword DMA 5920 * Timings will be used for both PIO and DMA, 5921 * so adjust DMA mode if needed 5922 */ 5923 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5924 drvp->PIO_mode = drvp->DMA_mode + 2; 5925 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5926 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5927 drvp->PIO_mode - 2 : 0; 5928 if (drvp->DMA_mode == 0) 5929 drvp->PIO_mode = 0; 5930 } 5931 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5932 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5933 ACER_IDETIM(chp->channel, drive), 5934 acer_pio[drvp->PIO_mode]); 5935 } 5936 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5937 acer_fifo_udma), DEBUG_PROBE); 5938 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5939 if (idedma_ctl != 0) { 5940 /* Add software bits in status register */ 5941 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5942 IDEDMA_CTL(chp->channel), idedma_ctl); 5943 } 5944 pciide_print_modes(cp); 5945 } 5946 5947 int 5948 acer_pci_intr(void *arg) 5949 { 5950 struct pciide_softc *sc = arg; 5951 struct pciide_channel *cp; 5952 struct channel_softc *wdc_cp; 5953 int i, rv, crv; 5954 u_int32_t chids; 5955 5956 rv = 0; 5957 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5958 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5959 cp = &sc->pciide_channels[i]; 5960 wdc_cp = &cp->wdc_channel; 5961 /* If a compat channel skip. */ 5962 if (cp->compat) 5963 continue; 5964 if (chids & ACER_CHIDS_INT(i)) { 5965 crv = wdcintr(wdc_cp); 5966 if (crv == 0) 5967 printf("%s:%d: bogus intr\n", 5968 sc->sc_wdcdev.sc_dev.dv_xname, i); 5969 else 5970 rv = 1; 5971 } 5972 } 5973 return (rv); 5974 } 5975 5976 int 5977 acer_dma_init(void *v, int channel, int drive, void *databuf, 5978 size_t datalen, int flags) 5979 { 5980 /* Use PIO for LBA48 transfers. */ 5981 if (flags & WDC_DMA_LBA48) 5982 return (EINVAL); 5983 5984 return (pciide_dma_init(v, channel, drive, databuf, datalen, flags)); 5985 } 5986 5987 void 5988 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5989 { 5990 struct pciide_channel *cp; 5991 int i, compatchan, revision; 5992 pcireg_t interface; 5993 bus_size_t cmdsize, ctlsize; 5994 5995 revision = sc->sc_rev; 5996 5997 /* 5998 * when the chip is in native mode it identifies itself as a 5999 * 'misc mass storage'. Fake interface in this case. 6000 */ 6001 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 6002 interface = PCI_INTERFACE(pa->pa_class); 6003 } else { 6004 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 6005 PCIIDE_INTERFACE_PCI(0); 6006 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6007 (revision == HPT370_REV || revision == HPT370A_REV || 6008 revision == HPT372_REV)) || 6009 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6010 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6011 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6012 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 6013 interface |= PCIIDE_INTERFACE_PCI(1); 6014 } 6015 6016 printf(": DMA"); 6017 pciide_mapreg_dma(sc, pa); 6018 printf("\n"); 6019 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6020 WDC_CAPABILITY_MODE; 6021 if (sc->sc_dma_ok) { 6022 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6023 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6024 sc->sc_wdcdev.irqack = pciide_irqack; 6025 } 6026 sc->sc_wdcdev.PIO_cap = 4; 6027 sc->sc_wdcdev.DMA_cap = 2; 6028 6029 sc->sc_wdcdev.set_modes = hpt_setup_channel; 6030 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6031 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6032 revision == HPT366_REV) { 6033 sc->sc_wdcdev.UDMA_cap = 4; 6034 /* 6035 * The 366 has 2 PCI IDE functions, one for primary and one 6036 * for secondary. So we need to call pciide_mapregs_compat() 6037 * with the real channel 6038 */ 6039 if (pa->pa_function == 0) { 6040 compatchan = 0; 6041 } else if (pa->pa_function == 1) { 6042 compatchan = 1; 6043 } else { 6044 printf("%s: unexpected PCI function %d\n", 6045 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 6046 return; 6047 } 6048 sc->sc_wdcdev.nchannels = 1; 6049 } else { 6050 sc->sc_wdcdev.nchannels = 2; 6051 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6052 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6053 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6054 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 6055 sc->sc_wdcdev.UDMA_cap = 6; 6056 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 6057 if (revision == HPT372_REV) 6058 sc->sc_wdcdev.UDMA_cap = 6; 6059 else 6060 sc->sc_wdcdev.UDMA_cap = 5; 6061 } 6062 } 6063 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6064 cp = &sc->pciide_channels[i]; 6065 compatchan = 0; 6066 if (sc->sc_wdcdev.nchannels > 1) { 6067 compatchan = i; 6068 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 6069 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 6070 printf("%s: %s ignored (disabled)\n", 6071 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6072 continue; 6073 } 6074 } 6075 if (pciide_chansetup(sc, i, interface) == 0) 6076 continue; 6077 if (interface & PCIIDE_INTERFACE_PCI(i)) { 6078 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 6079 &ctlsize, hpt_pci_intr); 6080 } else { 6081 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 6082 &cmdsize, &ctlsize); 6083 } 6084 if (cp->hw_ok == 0) 6085 return; 6086 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 6087 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 6088 wdcattach(&cp->wdc_channel); 6089 hpt_setup_channel(&cp->wdc_channel); 6090 } 6091 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6092 (revision == HPT370_REV || revision == HPT370A_REV || 6093 revision == HPT372_REV)) || 6094 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6095 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6096 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6097 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 6098 /* 6099 * Turn off fast interrupts 6100 */ 6101 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 6102 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 6103 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 6104 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 6105 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 6106 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 6107 6108 /* 6109 * HPT370 and highter has a bit to disable interrupts, 6110 * make sure to clear it 6111 */ 6112 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 6113 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 6114 ~HPT_CSEL_IRQDIS); 6115 } 6116 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 6117 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6118 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6119 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6120 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 6121 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6122 revision == HPT372_REV)) 6123 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 6124 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 6125 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 6126 6127 return; 6128 } 6129 6130 void 6131 hpt_setup_channel(struct channel_softc *chp) 6132 { 6133 struct ata_drive_datas *drvp; 6134 int drive; 6135 int cable; 6136 u_int32_t before, after; 6137 u_int32_t idedma_ctl; 6138 struct pciide_channel *cp = (struct pciide_channel *)chp; 6139 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6140 int revision = sc->sc_rev; 6141 u_int32_t *tim_pio, *tim_dma, *tim_udma; 6142 6143 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 6144 6145 /* setup DMA if needed */ 6146 pciide_channel_dma_setup(cp); 6147 6148 idedma_ctl = 0; 6149 6150 switch (sc->sc_pp->ide_product) { 6151 case PCI_PRODUCT_TRIONES_HPT366: 6152 if (revision == HPT370_REV || 6153 revision == HPT370A_REV) { 6154 tim_pio = hpt370_pio; 6155 tim_dma = hpt370_dma; 6156 tim_udma = hpt370_udma; 6157 } else if (revision == HPT372_REV) { 6158 tim_pio = hpt372_pio; 6159 tim_dma = hpt372_dma; 6160 tim_udma = hpt372_udma; 6161 } else { 6162 tim_pio = hpt366_pio; 6163 tim_dma = hpt366_dma; 6164 tim_udma = hpt366_udma; 6165 } 6166 break; 6167 case PCI_PRODUCT_TRIONES_HPT372A: 6168 case PCI_PRODUCT_TRIONES_HPT302: 6169 case PCI_PRODUCT_TRIONES_HPT371: 6170 tim_pio = hpt372_pio; 6171 tim_dma = hpt372_dma; 6172 tim_udma = hpt372_udma; 6173 break; 6174 case PCI_PRODUCT_TRIONES_HPT374: 6175 tim_pio = hpt374_pio; 6176 tim_dma = hpt374_dma; 6177 tim_udma = hpt374_udma; 6178 break; 6179 default: 6180 printf("%s: no known timing values\n", 6181 sc->sc_wdcdev.sc_dev.dv_xname); 6182 goto end; 6183 } 6184 6185 /* Per drive settings */ 6186 for (drive = 0; drive < 2; drive++) { 6187 drvp = &chp->ch_drive[drive]; 6188 /* If no drive, skip */ 6189 if ((drvp->drive_flags & DRIVE) == 0) 6190 continue; 6191 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 6192 HPT_IDETIM(chp->channel, drive)); 6193 6194 /* add timing values, setup DMA if needed */ 6195 if (drvp->drive_flags & DRIVE_UDMA) { 6196 /* use Ultra/DMA */ 6197 drvp->drive_flags &= ~DRIVE_DMA; 6198 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 6199 drvp->UDMA_mode > 2) { 6200 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6201 "cable not detected\n", drvp->drive_name, 6202 sc->sc_wdcdev.sc_dev.dv_xname, 6203 chp->channel, drive), DEBUG_PROBE); 6204 drvp->UDMA_mode = 2; 6205 } 6206 after = tim_udma[drvp->UDMA_mode]; 6207 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6208 } else if (drvp->drive_flags & DRIVE_DMA) { 6209 /* 6210 * use Multiword DMA. 6211 * Timings will be used for both PIO and DMA, so adjust 6212 * DMA mode if needed 6213 */ 6214 if (drvp->PIO_mode >= 3 && 6215 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 6216 drvp->DMA_mode = drvp->PIO_mode - 2; 6217 } 6218 after = tim_dma[drvp->DMA_mode]; 6219 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6220 } else { 6221 /* PIO only */ 6222 after = tim_pio[drvp->PIO_mode]; 6223 } 6224 pci_conf_write(sc->sc_pc, sc->sc_tag, 6225 HPT_IDETIM(chp->channel, drive), after); 6226 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 6227 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 6228 after, before), DEBUG_PROBE); 6229 } 6230 end: 6231 if (idedma_ctl != 0) { 6232 /* Add software bits in status register */ 6233 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6234 IDEDMA_CTL(chp->channel), idedma_ctl); 6235 } 6236 pciide_print_modes(cp); 6237 } 6238 6239 int 6240 hpt_pci_intr(void *arg) 6241 { 6242 struct pciide_softc *sc = arg; 6243 struct pciide_channel *cp; 6244 struct channel_softc *wdc_cp; 6245 int rv = 0; 6246 int dmastat, i, crv; 6247 6248 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6249 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6250 IDEDMA_CTL(i)); 6251 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 6252 IDEDMA_CTL_INTR) 6253 continue; 6254 cp = &sc->pciide_channels[i]; 6255 wdc_cp = &cp->wdc_channel; 6256 crv = wdcintr(wdc_cp); 6257 if (crv == 0) { 6258 printf("%s:%d: bogus intr\n", 6259 sc->sc_wdcdev.sc_dev.dv_xname, i); 6260 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6261 IDEDMA_CTL(i), dmastat); 6262 } else 6263 rv = 1; 6264 } 6265 return (rv); 6266 } 6267 6268 /* Macros to test product */ 6269 #define PDC_IS_262(sc) \ 6270 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 6271 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6272 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 6273 #define PDC_IS_265(sc) \ 6274 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6275 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 6276 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6277 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6278 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6279 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6280 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6281 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6282 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6283 #define PDC_IS_268(sc) \ 6284 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6285 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6286 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6287 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6288 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6289 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6290 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6291 #define PDC_IS_269(sc) \ 6292 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6293 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6294 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6295 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6296 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6297 6298 u_int8_t 6299 pdc268_config_read(struct channel_softc *chp, int index) 6300 { 6301 struct pciide_channel *cp = (struct pciide_channel *)chp; 6302 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6303 int channel = chp->channel; 6304 6305 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6306 PDC268_INDEX(channel), index); 6307 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6308 PDC268_DATA(channel))); 6309 } 6310 6311 void 6312 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6313 { 6314 struct pciide_channel *cp; 6315 int channel; 6316 pcireg_t interface, st, mode; 6317 bus_size_t cmdsize, ctlsize; 6318 6319 if (!PDC_IS_268(sc)) { 6320 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6321 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 6322 st), DEBUG_PROBE); 6323 } 6324 6325 /* turn off RAID mode */ 6326 if (!PDC_IS_268(sc)) 6327 st &= ~PDC2xx_STATE_IDERAID; 6328 6329 /* 6330 * can't rely on the PCI_CLASS_REG content if the chip was in raid 6331 * mode. We have to fake interface 6332 */ 6333 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 6334 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 6335 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 6336 6337 printf(": DMA"); 6338 pciide_mapreg_dma(sc, pa); 6339 6340 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6341 WDC_CAPABILITY_MODE; 6342 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 6343 PDC_IS_262(sc)) 6344 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 6345 if (sc->sc_dma_ok) { 6346 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6347 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6348 sc->sc_wdcdev.irqack = pciide_irqack; 6349 } 6350 sc->sc_wdcdev.PIO_cap = 4; 6351 sc->sc_wdcdev.DMA_cap = 2; 6352 if (PDC_IS_269(sc)) 6353 sc->sc_wdcdev.UDMA_cap = 6; 6354 else if (PDC_IS_265(sc)) 6355 sc->sc_wdcdev.UDMA_cap = 5; 6356 else if (PDC_IS_262(sc)) 6357 sc->sc_wdcdev.UDMA_cap = 4; 6358 else 6359 sc->sc_wdcdev.UDMA_cap = 2; 6360 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 6361 pdc20268_setup_channel : pdc202xx_setup_channel; 6362 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6363 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6364 6365 if (PDC_IS_262(sc)) { 6366 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 6367 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 6368 } 6369 6370 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6371 if (!PDC_IS_268(sc)) { 6372 /* setup failsafe defaults */ 6373 mode = 0; 6374 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 6375 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 6376 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 6377 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 6378 for (channel = 0; 6379 channel < sc->sc_wdcdev.nchannels; 6380 channel++) { 6381 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6382 "drive 0 initial timings 0x%x, now 0x%x\n", 6383 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6384 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 6385 DEBUG_PROBE); 6386 pci_conf_write(sc->sc_pc, sc->sc_tag, 6387 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 6388 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6389 "drive 1 initial timings 0x%x, now 0x%x\n", 6390 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6391 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 6392 pci_conf_write(sc->sc_pc, sc->sc_tag, 6393 PDC2xx_TIM(channel, 1), mode); 6394 } 6395 6396 mode = PDC2xx_SCR_DMA; 6397 if (PDC_IS_262(sc)) { 6398 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 6399 } else { 6400 /* the BIOS set it up this way */ 6401 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 6402 } 6403 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 6404 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 6405 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 6406 "now 0x%x\n", 6407 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6408 PDC2xx_SCR), 6409 mode), DEBUG_PROBE); 6410 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6411 PDC2xx_SCR, mode); 6412 6413 /* controller initial state register is OK even without BIOS */ 6414 /* Set DMA mode to IDE DMA compatibility */ 6415 mode = 6416 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 6417 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 6418 DEBUG_PROBE); 6419 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 6420 mode | 0x1); 6421 mode = 6422 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 6423 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 6424 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 6425 mode | 0x1); 6426 } 6427 6428 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6429 cp = &sc->pciide_channels[channel]; 6430 if (pciide_chansetup(sc, channel, interface) == 0) 6431 continue; 6432 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 6433 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 6434 printf("%s: %s ignored (disabled)\n", 6435 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6436 continue; 6437 } 6438 pciide_map_compat_intr(pa, cp, channel, interface); 6439 if (cp->hw_ok == 0) 6440 continue; 6441 if (PDC_IS_265(sc)) 6442 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6443 pdc20265_pci_intr); 6444 else 6445 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6446 pdc202xx_pci_intr); 6447 if (cp->hw_ok == 0) { 6448 pciide_unmap_compat_intr(pa, cp, channel, interface); 6449 continue; 6450 } 6451 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6452 st &= ~(PDC_IS_262(sc) ? 6453 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6454 pciide_unmap_compat_intr(pa, cp, channel, interface); 6455 } 6456 if (PDC_IS_268(sc)) 6457 pdc20268_setup_channel(&cp->wdc_channel); 6458 else 6459 pdc202xx_setup_channel(&cp->wdc_channel); 6460 } 6461 if (!PDC_IS_268(sc)) { 6462 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6463 "0x%x\n", st), DEBUG_PROBE); 6464 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6465 } 6466 return; 6467 } 6468 6469 void 6470 pdc202xx_setup_channel(struct channel_softc *chp) 6471 { 6472 struct ata_drive_datas *drvp; 6473 int drive; 6474 pcireg_t mode, st; 6475 u_int32_t idedma_ctl, scr, atapi; 6476 struct pciide_channel *cp = (struct pciide_channel *)chp; 6477 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6478 int channel = chp->channel; 6479 6480 /* setup DMA if needed */ 6481 pciide_channel_dma_setup(cp); 6482 6483 idedma_ctl = 0; 6484 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6485 sc->sc_wdcdev.sc_dev.dv_xname, 6486 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6487 DEBUG_PROBE); 6488 6489 /* Per channel settings */ 6490 if (PDC_IS_262(sc)) { 6491 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6492 PDC262_U66); 6493 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6494 /* Check cable */ 6495 if ((st & PDC262_STATE_80P(channel)) != 0 && 6496 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6497 chp->ch_drive[0].UDMA_mode > 2) || 6498 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6499 chp->ch_drive[1].UDMA_mode > 2))) { 6500 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6501 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6502 DEBUG_PROBE); 6503 if (chp->ch_drive[0].UDMA_mode > 2) 6504 chp->ch_drive[0].UDMA_mode = 2; 6505 if (chp->ch_drive[1].UDMA_mode > 2) 6506 chp->ch_drive[1].UDMA_mode = 2; 6507 } 6508 /* Trim UDMA mode */ 6509 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6510 chp->ch_drive[0].UDMA_mode <= 2) || 6511 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6512 chp->ch_drive[1].UDMA_mode <= 2)) { 6513 if (chp->ch_drive[0].UDMA_mode > 2) 6514 chp->ch_drive[0].UDMA_mode = 2; 6515 if (chp->ch_drive[1].UDMA_mode > 2) 6516 chp->ch_drive[1].UDMA_mode = 2; 6517 } 6518 /* Set U66 if needed */ 6519 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6520 chp->ch_drive[0].UDMA_mode > 2) || 6521 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6522 chp->ch_drive[1].UDMA_mode > 2)) 6523 scr |= PDC262_U66_EN(channel); 6524 else 6525 scr &= ~PDC262_U66_EN(channel); 6526 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6527 PDC262_U66, scr); 6528 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6529 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6530 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6531 PDC262_ATAPI(channel))), DEBUG_PROBE); 6532 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6533 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6534 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6535 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6536 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6537 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6538 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6539 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6540 atapi = 0; 6541 else 6542 atapi = PDC262_ATAPI_UDMA; 6543 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6544 PDC262_ATAPI(channel), atapi); 6545 } 6546 } 6547 for (drive = 0; drive < 2; drive++) { 6548 drvp = &chp->ch_drive[drive]; 6549 /* If no drive, skip */ 6550 if ((drvp->drive_flags & DRIVE) == 0) 6551 continue; 6552 mode = 0; 6553 if (drvp->drive_flags & DRIVE_UDMA) { 6554 /* use Ultra/DMA */ 6555 drvp->drive_flags &= ~DRIVE_DMA; 6556 mode = PDC2xx_TIM_SET_MB(mode, 6557 pdc2xx_udma_mb[drvp->UDMA_mode]); 6558 mode = PDC2xx_TIM_SET_MC(mode, 6559 pdc2xx_udma_mc[drvp->UDMA_mode]); 6560 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6561 } else if (drvp->drive_flags & DRIVE_DMA) { 6562 mode = PDC2xx_TIM_SET_MB(mode, 6563 pdc2xx_dma_mb[drvp->DMA_mode]); 6564 mode = PDC2xx_TIM_SET_MC(mode, 6565 pdc2xx_dma_mc[drvp->DMA_mode]); 6566 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6567 } else { 6568 mode = PDC2xx_TIM_SET_MB(mode, 6569 pdc2xx_dma_mb[0]); 6570 mode = PDC2xx_TIM_SET_MC(mode, 6571 pdc2xx_dma_mc[0]); 6572 } 6573 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6574 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6575 if (drvp->drive_flags & DRIVE_ATA) 6576 mode |= PDC2xx_TIM_PRE; 6577 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6578 if (drvp->PIO_mode >= 3) { 6579 mode |= PDC2xx_TIM_IORDY; 6580 if (drive == 0) 6581 mode |= PDC2xx_TIM_IORDYp; 6582 } 6583 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6584 "timings 0x%x\n", 6585 sc->sc_wdcdev.sc_dev.dv_xname, 6586 chp->channel, drive, mode), DEBUG_PROBE); 6587 pci_conf_write(sc->sc_pc, sc->sc_tag, 6588 PDC2xx_TIM(chp->channel, drive), mode); 6589 } 6590 if (idedma_ctl != 0) { 6591 /* Add software bits in status register */ 6592 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6593 IDEDMA_CTL(channel), idedma_ctl); 6594 } 6595 pciide_print_modes(cp); 6596 } 6597 6598 void 6599 pdc20268_setup_channel(struct channel_softc *chp) 6600 { 6601 struct ata_drive_datas *drvp; 6602 int drive, cable; 6603 u_int32_t idedma_ctl; 6604 struct pciide_channel *cp = (struct pciide_channel *)chp; 6605 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6606 int channel = chp->channel; 6607 6608 /* check 80 pins cable */ 6609 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6610 6611 /* setup DMA if needed */ 6612 pciide_channel_dma_setup(cp); 6613 6614 idedma_ctl = 0; 6615 6616 for (drive = 0; drive < 2; drive++) { 6617 drvp = &chp->ch_drive[drive]; 6618 /* If no drive, skip */ 6619 if ((drvp->drive_flags & DRIVE) == 0) 6620 continue; 6621 if (drvp->drive_flags & DRIVE_UDMA) { 6622 /* use Ultra/DMA */ 6623 drvp->drive_flags &= ~DRIVE_DMA; 6624 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6625 if (cable && drvp->UDMA_mode > 2) { 6626 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6627 "cable not detected\n", drvp->drive_name, 6628 sc->sc_wdcdev.sc_dev.dv_xname, 6629 channel, drive), DEBUG_PROBE); 6630 drvp->UDMA_mode = 2; 6631 } 6632 } else if (drvp->drive_flags & DRIVE_DMA) { 6633 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6634 } 6635 } 6636 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6637 if (idedma_ctl != 0) { 6638 /* Add software bits in status register */ 6639 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6640 IDEDMA_CTL(channel), idedma_ctl); 6641 } 6642 pciide_print_modes(cp); 6643 } 6644 6645 int 6646 pdc202xx_pci_intr(void *arg) 6647 { 6648 struct pciide_softc *sc = arg; 6649 struct pciide_channel *cp; 6650 struct channel_softc *wdc_cp; 6651 int i, rv, crv; 6652 u_int32_t scr; 6653 6654 rv = 0; 6655 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6656 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6657 cp = &sc->pciide_channels[i]; 6658 wdc_cp = &cp->wdc_channel; 6659 /* If a compat channel skip. */ 6660 if (cp->compat) 6661 continue; 6662 if (scr & PDC2xx_SCR_INT(i)) { 6663 crv = wdcintr(wdc_cp); 6664 if (crv == 0) 6665 printf("%s:%d: bogus intr (reg 0x%x)\n", 6666 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6667 else 6668 rv = 1; 6669 } 6670 } 6671 return (rv); 6672 } 6673 6674 int 6675 pdc20265_pci_intr(void *arg) 6676 { 6677 struct pciide_softc *sc = arg; 6678 struct pciide_channel *cp; 6679 struct channel_softc *wdc_cp; 6680 int i, rv, crv; 6681 u_int32_t dmastat; 6682 6683 rv = 0; 6684 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6685 cp = &sc->pciide_channels[i]; 6686 wdc_cp = &cp->wdc_channel; 6687 /* If a compat channel skip. */ 6688 if (cp->compat) 6689 continue; 6690 6691 /* 6692 * In case of shared IRQ check that the interrupt 6693 * was actually generated by this channel. 6694 * Only check the channel that is enabled. 6695 */ 6696 if (cp->hw_ok && PDC_IS_268(sc)) { 6697 if ((pdc268_config_read(wdc_cp, 6698 0x0b) & PDC268_INTR) == 0) 6699 continue; 6700 } 6701 6702 /* 6703 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6704 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6705 * So use it instead (requires 2 reg reads instead of 1, 6706 * but we can't do it another way). 6707 */ 6708 dmastat = bus_space_read_1(sc->sc_dma_iot, 6709 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6710 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6711 continue; 6712 6713 crv = wdcintr(wdc_cp); 6714 if (crv == 0) 6715 printf("%s:%d: bogus intr\n", 6716 sc->sc_wdcdev.sc_dev.dv_xname, i); 6717 else 6718 rv = 1; 6719 } 6720 return (rv); 6721 } 6722 6723 void 6724 pdc20262_dma_start(void *v, int channel, int drive) 6725 { 6726 struct pciide_softc *sc = v; 6727 struct pciide_dma_maps *dma_maps = 6728 &sc->pciide_channels[channel].dma_maps[drive]; 6729 u_int8_t clock; 6730 u_int32_t count; 6731 6732 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6733 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6734 PDC262_U66); 6735 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6736 PDC262_U66, clock | PDC262_U66_EN(channel)); 6737 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6738 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6739 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6740 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6741 PDC262_ATAPI(channel), count); 6742 } 6743 6744 pciide_dma_start(v, channel, drive); 6745 } 6746 6747 int 6748 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6749 { 6750 struct pciide_softc *sc = v; 6751 struct pciide_dma_maps *dma_maps = 6752 &sc->pciide_channels[channel].dma_maps[drive]; 6753 u_int8_t clock; 6754 6755 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6756 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6757 PDC262_U66); 6758 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6759 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6760 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6761 PDC262_ATAPI(channel), 0); 6762 } 6763 6764 return (pciide_dma_finish(v, channel, drive, force)); 6765 } 6766 6767 void 6768 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6769 { 6770 struct pciide_channel *cp; 6771 struct channel_softc *wdc_cp; 6772 struct pciide_pdcsata *ps; 6773 int channel, i; 6774 bus_size_t dmasize; 6775 pci_intr_handle_t intrhandle; 6776 const char *intrstr; 6777 6778 /* Allocate memory for private data */ 6779 sc->sc_cookie = malloc(sizeof(*ps), M_DEVBUF, M_NOWAIT | M_ZERO); 6780 ps = sc->sc_cookie; 6781 6782 /* 6783 * Promise SATA controllers have 3 or 4 channels, 6784 * the usual IDE registers are mapped in I/O space, with offsets. 6785 */ 6786 if (pci_intr_map(pa, &intrhandle) != 0) { 6787 printf(": couldn't map interrupt\n"); 6788 return; 6789 } 6790 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6791 6792 switch (sc->sc_pp->ide_product) { 6793 case PCI_PRODUCT_PROMISE_PDC20318: 6794 case PCI_PRODUCT_PROMISE_PDC20319: 6795 case PCI_PRODUCT_PROMISE_PDC20371: 6796 case PCI_PRODUCT_PROMISE_PDC20375: 6797 case PCI_PRODUCT_PROMISE_PDC20376: 6798 case PCI_PRODUCT_PROMISE_PDC20377: 6799 case PCI_PRODUCT_PROMISE_PDC20378: 6800 case PCI_PRODUCT_PROMISE_PDC20379: 6801 default: 6802 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6803 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6804 sc->sc_wdcdev.sc_dev.dv_xname); 6805 break; 6806 6807 case PCI_PRODUCT_PROMISE_PDC40518: 6808 case PCI_PRODUCT_PROMISE_PDC40519: 6809 case PCI_PRODUCT_PROMISE_PDC40718: 6810 case PCI_PRODUCT_PROMISE_PDC40719: 6811 case PCI_PRODUCT_PROMISE_PDC40779: 6812 case PCI_PRODUCT_PROMISE_PDC20571: 6813 case PCI_PRODUCT_PROMISE_PDC20575: 6814 case PCI_PRODUCT_PROMISE_PDC20579: 6815 case PCI_PRODUCT_PROMISE_PDC20771: 6816 case PCI_PRODUCT_PROMISE_PDC20775: 6817 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6818 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6819 sc->sc_wdcdev.sc_dev.dv_xname); 6820 break; 6821 } 6822 6823 if (sc->sc_pci_ih == NULL) { 6824 printf(": couldn't establish native-PCI interrupt"); 6825 if (intrstr != NULL) 6826 printf(" at %s", intrstr); 6827 printf("\n"); 6828 return; 6829 } 6830 6831 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6832 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6833 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6834 if (!sc->sc_dma_ok) { 6835 printf(": couldn't map bus-master DMA registers\n"); 6836 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6837 return; 6838 } 6839 6840 sc->sc_dmat = pa->pa_dmat; 6841 6842 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6843 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6844 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6845 printf(": couldn't map IDE registers\n"); 6846 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6847 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6848 return; 6849 } 6850 6851 printf(": DMA\n"); 6852 6853 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6854 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6855 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6856 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6857 sc->sc_wdcdev.PIO_cap = 4; 6858 sc->sc_wdcdev.DMA_cap = 2; 6859 sc->sc_wdcdev.UDMA_cap = 6; 6860 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6861 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6862 6863 switch (sc->sc_pp->ide_product) { 6864 case PCI_PRODUCT_PROMISE_PDC20318: 6865 case PCI_PRODUCT_PROMISE_PDC20319: 6866 case PCI_PRODUCT_PROMISE_PDC20371: 6867 case PCI_PRODUCT_PROMISE_PDC20375: 6868 case PCI_PRODUCT_PROMISE_PDC20376: 6869 case PCI_PRODUCT_PROMISE_PDC20377: 6870 case PCI_PRODUCT_PROMISE_PDC20378: 6871 case PCI_PRODUCT_PROMISE_PDC20379: 6872 default: 6873 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6874 sc->sc_wdcdev.nchannels = 6875 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6876 PDC203xx_NCHANNELS : 3; 6877 break; 6878 6879 case PCI_PRODUCT_PROMISE_PDC40518: 6880 case PCI_PRODUCT_PROMISE_PDC40519: 6881 case PCI_PRODUCT_PROMISE_PDC40718: 6882 case PCI_PRODUCT_PROMISE_PDC40719: 6883 case PCI_PRODUCT_PROMISE_PDC40779: 6884 case PCI_PRODUCT_PROMISE_PDC20571: 6885 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6886 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6887 6888 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6889 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6890 6891 break; 6892 case PCI_PRODUCT_PROMISE_PDC20575: 6893 case PCI_PRODUCT_PROMISE_PDC20579: 6894 case PCI_PRODUCT_PROMISE_PDC20771: 6895 case PCI_PRODUCT_PROMISE_PDC20775: 6896 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6897 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6898 6899 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6900 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6901 6902 break; 6903 } 6904 6905 sc->sc_wdcdev.dma_arg = sc; 6906 sc->sc_wdcdev.dma_init = pciide_dma_init; 6907 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6908 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6909 6910 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6911 channel++) { 6912 cp = &sc->pciide_channels[channel]; 6913 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6914 6915 cp->ih = sc->sc_pci_ih; 6916 cp->name = NULL; 6917 cp->wdc_channel.channel = channel; 6918 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6919 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 6920 if (cp->wdc_channel.ch_queue == NULL) { 6921 printf("%s: channel %d: " 6922 "cannot allocate channel queue\n", 6923 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6924 continue; 6925 } 6926 wdc_cp = &cp->wdc_channel; 6927 6928 ps->regs[channel].ctl_iot = ps->ba5_st; 6929 ps->regs[channel].cmd_iot = ps->ba5_st; 6930 6931 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6932 0x0238 + (channel << 7), 1, 6933 &ps->regs[channel].ctl_ioh) != 0) { 6934 printf("%s: couldn't map channel %d ctl regs\n", 6935 sc->sc_wdcdev.sc_dev.dv_xname, 6936 channel); 6937 continue; 6938 } 6939 for (i = 0; i < WDC_NREG; i++) { 6940 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6941 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6942 &ps->regs[channel].cmd_iohs[i]) != 0) { 6943 printf("%s: couldn't map channel %d cmd " 6944 "regs\n", 6945 sc->sc_wdcdev.sc_dev.dv_xname, 6946 channel); 6947 continue; 6948 } 6949 } 6950 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6951 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6952 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6953 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6954 wdc_cp->data32iot = wdc_cp->cmd_iot = 6955 ps->regs[channel].cmd_iot; 6956 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6957 ps->regs[channel].cmd_iohs[0]; 6958 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6959 6960 /* 6961 * Subregion de busmaster registers. They're spread all over 6962 * the controller's register space :(. They are also 4 bytes 6963 * sized, with some specific extentions in the extra bits. 6964 * It also seems that the IDEDMA_CTL register isn't available. 6965 */ 6966 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6967 0x260 + (channel << 7), 1, 6968 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 6969 printf("%s channel %d: can't subregion DMA " 6970 "registers\n", 6971 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6972 continue; 6973 } 6974 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6975 0x244 + (channel << 7), 4, 6976 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 6977 printf("%s channel %d: can't subregion DMA " 6978 "registers\n", 6979 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6980 continue; 6981 } 6982 6983 wdcattach(wdc_cp); 6984 bus_space_write_4(sc->sc_dma_iot, 6985 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6986 (bus_space_read_4(sc->sc_dma_iot, 6987 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6988 0) & ~0x00003f9f) | (channel + 1)); 6989 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6990 (channel + 1) << 2, 0x00000001); 6991 6992 pdc203xx_setup_channel(&cp->wdc_channel); 6993 } 6994 6995 printf("%s: using %s for native-PCI interrupt\n", 6996 sc->sc_wdcdev.sc_dev.dv_xname, 6997 intrstr ? intrstr : "unknown interrupt"); 6998 } 6999 7000 void 7001 pdc203xx_setup_channel(struct channel_softc *chp) 7002 { 7003 struct ata_drive_datas *drvp; 7004 struct pciide_channel *cp = (struct pciide_channel *)chp; 7005 int drive, s; 7006 7007 pciide_channel_dma_setup(cp); 7008 7009 for (drive = 0; drive < 2; drive++) { 7010 drvp = &chp->ch_drive[drive]; 7011 if ((drvp->drive_flags & DRIVE) == 0) 7012 continue; 7013 if (drvp->drive_flags & DRIVE_UDMA) { 7014 s = splbio(); 7015 drvp->drive_flags &= ~DRIVE_DMA; 7016 splx(s); 7017 } 7018 } 7019 pciide_print_modes(cp); 7020 } 7021 7022 int 7023 pdc203xx_pci_intr(void *arg) 7024 { 7025 struct pciide_softc *sc = arg; 7026 struct pciide_channel *cp; 7027 struct channel_softc *wdc_cp; 7028 struct pciide_pdcsata *ps = sc->sc_cookie; 7029 int i, rv, crv; 7030 u_int32_t scr; 7031 7032 rv = 0; 7033 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 7034 7035 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7036 cp = &sc->pciide_channels[i]; 7037 wdc_cp = &cp->wdc_channel; 7038 if (scr & (1 << (i + 1))) { 7039 crv = wdcintr(wdc_cp); 7040 if (crv == 0) { 7041 printf("%s:%d: bogus intr (reg 0x%x)\n", 7042 sc->sc_wdcdev.sc_dev.dv_xname, 7043 i, scr); 7044 } else 7045 rv = 1; 7046 } 7047 } 7048 7049 return (rv); 7050 } 7051 7052 int 7053 pdc205xx_pci_intr(void *arg) 7054 { 7055 struct pciide_softc *sc = arg; 7056 struct pciide_channel *cp; 7057 struct channel_softc *wdc_cp; 7058 struct pciide_pdcsata *ps = sc->sc_cookie; 7059 int i, rv, crv; 7060 u_int32_t scr, status; 7061 7062 rv = 0; 7063 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 7064 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 7065 7066 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 7067 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 7068 7069 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7070 cp = &sc->pciide_channels[i]; 7071 wdc_cp = &cp->wdc_channel; 7072 if (scr & (1 << (i + 1))) { 7073 crv = wdcintr(wdc_cp); 7074 if (crv == 0) { 7075 printf("%s:%d: bogus intr (reg 0x%x)\n", 7076 sc->sc_wdcdev.sc_dev.dv_xname, 7077 i, scr); 7078 } else 7079 rv = 1; 7080 } 7081 } 7082 return rv; 7083 } 7084 7085 void 7086 pdc203xx_irqack(struct channel_softc *chp) 7087 { 7088 struct pciide_channel *cp = (struct pciide_channel *)chp; 7089 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7090 struct pciide_pdcsata *ps = sc->sc_cookie; 7091 int chan = chp->channel; 7092 7093 bus_space_write_4(sc->sc_dma_iot, 7094 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 7095 (bus_space_read_4(sc->sc_dma_iot, 7096 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 7097 0) & ~0x00003f9f) | (chan + 1)); 7098 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 7099 (chan + 1) << 2, 0x00000001); 7100 } 7101 7102 void 7103 pdc203xx_dma_start(void *v, int channel, int drive) 7104 { 7105 struct pciide_softc *sc = v; 7106 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7107 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7108 struct pciide_pdcsata *ps = sc->sc_cookie; 7109 7110 /* Write table address */ 7111 bus_space_write_4(sc->sc_dma_iot, 7112 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 7113 dma_maps->dmamap_table->dm_segs[0].ds_addr); 7114 7115 /* Start DMA engine */ 7116 bus_space_write_4(sc->sc_dma_iot, 7117 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7118 (bus_space_read_4(sc->sc_dma_iot, 7119 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7120 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 7121 } 7122 7123 int 7124 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 7125 { 7126 struct pciide_softc *sc = v; 7127 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7128 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7129 struct pciide_pdcsata *ps = sc->sc_cookie; 7130 7131 /* Stop DMA channel */ 7132 bus_space_write_4(sc->sc_dma_iot, 7133 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7134 (bus_space_read_4(sc->sc_dma_iot, 7135 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7136 0) & ~0x80)); 7137 7138 /* Unload the map of the data buffer */ 7139 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 7140 dma_maps->dmamap_xfer->dm_mapsize, 7141 (dma_maps->dma_flags & WDC_DMA_READ) ? 7142 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 7143 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 7144 7145 return (0); 7146 } 7147 7148 u_int8_t 7149 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7150 { 7151 struct pciide_channel *cp = (struct pciide_channel *)chp; 7152 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7153 struct pciide_pdcsata *ps = sc->sc_cookie; 7154 u_int8_t val; 7155 7156 if (reg & _WDC_AUX) { 7157 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 7158 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 7159 } else { 7160 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 7161 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 7162 return (val); 7163 } 7164 } 7165 7166 void 7167 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7168 { 7169 struct pciide_channel *cp = (struct pciide_channel *)chp; 7170 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7171 struct pciide_pdcsata *ps = sc->sc_cookie; 7172 7173 if (reg & _WDC_AUX) 7174 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 7175 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 7176 else 7177 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 7178 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 7179 0, val); 7180 } 7181 7182 void 7183 pdc205xx_do_reset(struct channel_softc *chp) 7184 { 7185 struct pciide_channel *cp = (struct pciide_channel *)chp; 7186 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7187 struct pciide_pdcsata *ps = sc->sc_cookie; 7188 u_int32_t scontrol; 7189 7190 wdc_do_reset(chp); 7191 7192 /* reset SATA */ 7193 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7194 SCONTROL_WRITE(ps, chp->channel, scontrol); 7195 delay(50*1000); 7196 7197 scontrol &= ~SControl_DET_INIT; 7198 SCONTROL_WRITE(ps, chp->channel, scontrol); 7199 delay(50*1000); 7200 } 7201 7202 void 7203 pdc205xx_drv_probe(struct channel_softc *chp) 7204 { 7205 struct pciide_channel *cp = (struct pciide_channel *)chp; 7206 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7207 struct pciide_pdcsata *ps = sc->sc_cookie; 7208 bus_space_handle_t *iohs; 7209 u_int32_t scontrol, sstatus; 7210 u_int16_t scnt, sn, cl, ch; 7211 int s; 7212 7213 SCONTROL_WRITE(ps, chp->channel, 0); 7214 delay(50*1000); 7215 7216 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7217 SCONTROL_WRITE(ps,chp->channel,scontrol); 7218 delay(50*1000); 7219 7220 scontrol &= ~SControl_DET_INIT; 7221 SCONTROL_WRITE(ps,chp->channel,scontrol); 7222 delay(50*1000); 7223 7224 sstatus = SSTATUS_READ(ps,chp->channel); 7225 7226 switch (sstatus & SStatus_DET_mask) { 7227 case SStatus_DET_NODEV: 7228 /* No Device; be silent. */ 7229 break; 7230 7231 case SStatus_DET_DEV_NE: 7232 printf("%s: port %d: device connected, but " 7233 "communication not established\n", 7234 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7235 break; 7236 7237 case SStatus_DET_OFFLINE: 7238 printf("%s: port %d: PHY offline\n", 7239 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7240 break; 7241 7242 case SStatus_DET_DEV: 7243 iohs = ps->regs[chp->channel].cmd_iohs; 7244 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 7245 WDSD_IBM); 7246 delay(10); /* 400ns delay */ 7247 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 7248 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 7249 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 7250 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 7251 #if 0 7252 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7253 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7254 scnt, sn, cl, ch); 7255 #endif 7256 /* 7257 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7258 * cases we get wrong values here, so ignore it. 7259 */ 7260 s = splbio(); 7261 if (cl == 0x14 && ch == 0xeb) 7262 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7263 else 7264 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7265 splx(s); 7266 #if 0 7267 printf("%s: port %d: device present", 7268 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7269 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7270 case 1: 7271 printf(", speed: 1.5Gb/s"); 7272 break; 7273 case 2: 7274 printf(", speed: 3.0Gb/s"); 7275 break; 7276 } 7277 printf("\n"); 7278 #endif 7279 break; 7280 7281 default: 7282 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7283 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7284 } 7285 } 7286 7287 #ifdef notyet 7288 /* 7289 * Inline functions for accessing the timing registers of the 7290 * OPTi controller. 7291 * 7292 * These *MUST* disable interrupts as they need atomic access to 7293 * certain magic registers. Failure to adhere to this *will* 7294 * break things in subtle ways if the wdc registers are accessed 7295 * by an interrupt routine while this magic sequence is executing. 7296 */ 7297 static __inline__ u_int8_t 7298 opti_read_config(struct channel_softc *chp, int reg) 7299 { 7300 u_int8_t rv; 7301 int s = splhigh(); 7302 7303 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7304 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7305 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7306 7307 /* Followed by an 8-bit write of 0x3 to register #2 */ 7308 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7309 7310 /* Now we can read the required register */ 7311 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 7312 7313 /* Restore the real registers */ 7314 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7315 7316 splx(s); 7317 7318 return (rv); 7319 } 7320 7321 static __inline__ void 7322 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 7323 { 7324 int s = splhigh(); 7325 7326 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7327 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7328 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7329 7330 /* Followed by an 8-bit write of 0x3 to register #2 */ 7331 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7332 7333 /* Now we can write the required register */ 7334 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 7335 7336 /* Restore the real registers */ 7337 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7338 7339 splx(s); 7340 } 7341 7342 void 7343 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7344 { 7345 struct pciide_channel *cp; 7346 bus_size_t cmdsize, ctlsize; 7347 pcireg_t interface; 7348 u_int8_t init_ctrl; 7349 int channel; 7350 7351 printf(": DMA"); 7352 /* 7353 * XXXSCW: 7354 * There seem to be a couple of buggy revisions/implementations 7355 * of the OPTi pciide chipset. This kludge seems to fix one of 7356 * the reported problems (NetBSD PR/11644) but still fails for the 7357 * other (NetBSD PR/13151), although the latter may be due to other 7358 * issues too... 7359 */ 7360 if (sc->sc_rev <= 0x12) { 7361 printf(" (disabled)"); 7362 sc->sc_dma_ok = 0; 7363 sc->sc_wdcdev.cap = 0; 7364 } else { 7365 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 7366 pciide_mapreg_dma(sc, pa); 7367 } 7368 7369 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 7370 sc->sc_wdcdev.PIO_cap = 4; 7371 if (sc->sc_dma_ok) { 7372 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7373 sc->sc_wdcdev.irqack = pciide_irqack; 7374 sc->sc_wdcdev.DMA_cap = 2; 7375 } 7376 sc->sc_wdcdev.set_modes = opti_setup_channel; 7377 7378 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7379 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7380 7381 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 7382 OPTI_REG_INIT_CONTROL); 7383 7384 interface = PCI_INTERFACE(pa->pa_class); 7385 7386 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7387 7388 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7389 cp = &sc->pciide_channels[channel]; 7390 if (pciide_chansetup(sc, channel, interface) == 0) 7391 continue; 7392 if (channel == 1 && 7393 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 7394 printf("%s: %s ignored (disabled)\n", 7395 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7396 continue; 7397 } 7398 pciide_map_compat_intr(pa, cp, channel, interface); 7399 if (cp->hw_ok == 0) 7400 continue; 7401 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7402 pciide_pci_intr); 7403 if (cp->hw_ok == 0) { 7404 pciide_unmap_compat_intr(pa, cp, channel, interface); 7405 continue; 7406 } 7407 opti_setup_channel(&cp->wdc_channel); 7408 } 7409 } 7410 7411 void 7412 opti_setup_channel(struct channel_softc *chp) 7413 { 7414 struct ata_drive_datas *drvp; 7415 struct pciide_channel *cp = (struct pciide_channel *)chp; 7416 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7417 int drive, spd; 7418 int mode[2]; 7419 u_int8_t rv, mr; 7420 7421 /* 7422 * The `Delay' and `Address Setup Time' fields of the 7423 * Miscellaneous Register are always zero initially. 7424 */ 7425 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 7426 mr &= ~(OPTI_MISC_DELAY_MASK | 7427 OPTI_MISC_ADDR_SETUP_MASK | 7428 OPTI_MISC_INDEX_MASK); 7429 7430 /* Prime the control register before setting timing values */ 7431 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 7432 7433 /* Determine the clockrate of the PCIbus the chip is attached to */ 7434 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7435 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7436 7437 /* setup DMA if needed */ 7438 pciide_channel_dma_setup(cp); 7439 7440 for (drive = 0; drive < 2; drive++) { 7441 drvp = &chp->ch_drive[drive]; 7442 /* If no drive, skip */ 7443 if ((drvp->drive_flags & DRIVE) == 0) { 7444 mode[drive] = -1; 7445 continue; 7446 } 7447 7448 if ((drvp->drive_flags & DRIVE_DMA)) { 7449 /* 7450 * Timings will be used for both PIO and DMA, 7451 * so adjust DMA mode if needed 7452 */ 7453 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7454 drvp->PIO_mode = drvp->DMA_mode + 2; 7455 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7456 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7457 drvp->PIO_mode - 2 : 0; 7458 if (drvp->DMA_mode == 0) 7459 drvp->PIO_mode = 0; 7460 7461 mode[drive] = drvp->DMA_mode + 5; 7462 } else 7463 mode[drive] = drvp->PIO_mode; 7464 7465 if (drive && mode[0] >= 0 && 7466 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7467 /* 7468 * Can't have two drives using different values 7469 * for `Address Setup Time'. 7470 * Slow down the faster drive to compensate. 7471 */ 7472 int d = (opti_tim_as[spd][mode[0]] > 7473 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7474 7475 mode[d] = mode[1-d]; 7476 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7477 chp->ch_drive[d].DMA_mode = 0; 7478 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7479 } 7480 } 7481 7482 for (drive = 0; drive < 2; drive++) { 7483 int m; 7484 if ((m = mode[drive]) < 0) 7485 continue; 7486 7487 /* Set the Address Setup Time and select appropriate index */ 7488 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7489 rv |= OPTI_MISC_INDEX(drive); 7490 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7491 7492 /* Set the pulse width and recovery timing parameters */ 7493 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7494 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7495 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7496 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7497 7498 /* Set the Enhanced Mode register appropriately */ 7499 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7500 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7501 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7502 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7503 } 7504 7505 /* Finally, enable the timings */ 7506 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7507 7508 pciide_print_modes(cp); 7509 } 7510 #endif 7511 7512 void 7513 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7514 { 7515 struct pciide_channel *cp; 7516 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7517 pcitag_t pcib_tag; 7518 int channel; 7519 bus_size_t cmdsize, ctlsize; 7520 7521 printf(": DMA"); 7522 pciide_mapreg_dma(sc, pa); 7523 printf("\n"); 7524 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7525 WDC_CAPABILITY_MODE; 7526 7527 if (sc->sc_dma_ok) { 7528 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7529 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7530 sc->sc_wdcdev.irqack = pciide_irqack; 7531 } 7532 sc->sc_wdcdev.PIO_cap = 4; 7533 sc->sc_wdcdev.DMA_cap = 2; 7534 switch (sc->sc_pp->ide_product) { 7535 case PCI_PRODUCT_RCC_OSB4_IDE: 7536 sc->sc_wdcdev.UDMA_cap = 2; 7537 break; 7538 case PCI_PRODUCT_RCC_CSB5_IDE: 7539 if (sc->sc_rev < 0x92) 7540 sc->sc_wdcdev.UDMA_cap = 4; 7541 else 7542 sc->sc_wdcdev.UDMA_cap = 5; 7543 break; 7544 case PCI_PRODUCT_RCC_CSB6_IDE: 7545 sc->sc_wdcdev.UDMA_cap = 4; 7546 break; 7547 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7548 case PCI_PRODUCT_RCC_HT_1000_IDE: 7549 sc->sc_wdcdev.UDMA_cap = 5; 7550 break; 7551 } 7552 7553 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7554 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7555 sc->sc_wdcdev.nchannels = 7556 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7557 7558 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7559 cp = &sc->pciide_channels[channel]; 7560 if (pciide_chansetup(sc, channel, interface) == 0) 7561 continue; 7562 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7563 serverworks_pci_intr); 7564 if (cp->hw_ok == 0) 7565 return; 7566 pciide_map_compat_intr(pa, cp, channel, interface); 7567 if (cp->hw_ok == 0) 7568 return; 7569 serverworks_setup_channel(&cp->wdc_channel); 7570 } 7571 7572 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7573 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7574 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7575 } 7576 7577 void 7578 serverworks_setup_channel(struct channel_softc *chp) 7579 { 7580 struct ata_drive_datas *drvp; 7581 struct pciide_channel *cp = (struct pciide_channel *)chp; 7582 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7583 int channel = chp->channel; 7584 int drive, unit; 7585 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7586 u_int32_t idedma_ctl; 7587 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7588 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7589 7590 /* setup DMA if needed */ 7591 pciide_channel_dma_setup(cp); 7592 7593 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7594 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7595 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7596 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7597 7598 pio_time &= ~(0xffff << (16 * channel)); 7599 dma_time &= ~(0xffff << (16 * channel)); 7600 pio_mode &= ~(0xff << (8 * channel + 16)); 7601 udma_mode &= ~(0xff << (8 * channel + 16)); 7602 udma_mode &= ~(3 << (2 * channel)); 7603 7604 idedma_ctl = 0; 7605 7606 /* Per drive settings */ 7607 for (drive = 0; drive < 2; drive++) { 7608 drvp = &chp->ch_drive[drive]; 7609 /* If no drive, skip */ 7610 if ((drvp->drive_flags & DRIVE) == 0) 7611 continue; 7612 unit = drive + 2 * channel; 7613 /* add timing values, setup DMA if needed */ 7614 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7615 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7616 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7617 (drvp->drive_flags & DRIVE_UDMA)) { 7618 /* use Ultra/DMA, check for 80-pin cable */ 7619 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7620 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7621 PCI_SUBSYS_ID_REG)) & 7622 (1 << (14 + channel))) == 0) { 7623 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7624 "cable not detected\n", drvp->drive_name, 7625 sc->sc_wdcdev.sc_dev.dv_xname, 7626 channel, drive), DEBUG_PROBE); 7627 drvp->UDMA_mode = 2; 7628 } 7629 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7630 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7631 udma_mode |= 1 << unit; 7632 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7633 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7634 (drvp->drive_flags & DRIVE_DMA)) { 7635 /* use Multiword DMA */ 7636 drvp->drive_flags &= ~DRIVE_UDMA; 7637 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7638 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7639 } else { 7640 /* PIO only */ 7641 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7642 } 7643 } 7644 7645 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7646 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7647 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7648 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7649 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7650 7651 if (idedma_ctl != 0) { 7652 /* Add software bits in status register */ 7653 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7654 IDEDMA_CTL(channel), idedma_ctl); 7655 } 7656 pciide_print_modes(cp); 7657 } 7658 7659 int 7660 serverworks_pci_intr(void *arg) 7661 { 7662 struct pciide_softc *sc = arg; 7663 struct pciide_channel *cp; 7664 struct channel_softc *wdc_cp; 7665 int rv = 0; 7666 int dmastat, i, crv; 7667 7668 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7669 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7670 IDEDMA_CTL(i)); 7671 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7672 IDEDMA_CTL_INTR) 7673 continue; 7674 cp = &sc->pciide_channels[i]; 7675 wdc_cp = &cp->wdc_channel; 7676 crv = wdcintr(wdc_cp); 7677 if (crv == 0) { 7678 printf("%s:%d: bogus intr\n", 7679 sc->sc_wdcdev.sc_dev.dv_xname, i); 7680 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7681 IDEDMA_CTL(i), dmastat); 7682 } else 7683 rv = 1; 7684 } 7685 return (rv); 7686 } 7687 7688 void 7689 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7690 { 7691 struct pciide_channel *cp; 7692 pci_intr_handle_t intrhandle; 7693 const char *intrstr; 7694 int channel; 7695 struct pciide_svwsata *ss; 7696 7697 /* Allocate memory for private data */ 7698 sc->sc_cookie = malloc(sizeof(*ss), M_DEVBUF, M_NOWAIT | M_ZERO); 7699 ss = sc->sc_cookie; 7700 7701 /* The 4-port version has a dummy second function. */ 7702 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7703 PCI_MAPREG_START + 0x14) == 0) { 7704 printf("\n"); 7705 return; 7706 } 7707 7708 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7709 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7710 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7711 printf(": unable to map BA5 register space\n"); 7712 return; 7713 } 7714 7715 printf(": DMA"); 7716 svwsata_mapreg_dma(sc, pa); 7717 printf("\n"); 7718 7719 if (sc->sc_dma_ok) { 7720 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7721 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7722 sc->sc_wdcdev.irqack = pciide_irqack; 7723 } 7724 sc->sc_wdcdev.PIO_cap = 4; 7725 sc->sc_wdcdev.DMA_cap = 2; 7726 sc->sc_wdcdev.UDMA_cap = 6; 7727 7728 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7729 sc->sc_wdcdev.nchannels = 4; 7730 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7731 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7732 sc->sc_wdcdev.set_modes = sata_setup_channel; 7733 7734 /* We can use SControl and SStatus to probe for drives. */ 7735 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7736 7737 /* Map and establish the interrupt handler. */ 7738 if(pci_intr_map(pa, &intrhandle) != 0) { 7739 printf("%s: couldn't map native-PCI interrupt\n", 7740 sc->sc_wdcdev.sc_dev.dv_xname); 7741 return; 7742 } 7743 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7744 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7745 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7746 if (sc->sc_pci_ih != NULL) { 7747 printf("%s: using %s for native-PCI interrupt\n", 7748 sc->sc_wdcdev.sc_dev.dv_xname, 7749 intrstr ? intrstr : "unknown interrupt"); 7750 } else { 7751 printf("%s: couldn't establish native-PCI interrupt", 7752 sc->sc_wdcdev.sc_dev.dv_xname); 7753 if (intrstr != NULL) 7754 printf(" at %s", intrstr); 7755 printf("\n"); 7756 return; 7757 } 7758 7759 switch (sc->sc_pp->ide_product) { 7760 case PCI_PRODUCT_RCC_K2_SATA: 7761 bus_space_write_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1, 7762 bus_space_read_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1) 7763 & ~0x00040000); 7764 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7765 SVWSATA_SIM, 0); 7766 break; 7767 } 7768 7769 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7770 cp = &sc->pciide_channels[channel]; 7771 if (pciide_chansetup(sc, channel, 0) == 0) 7772 continue; 7773 svwsata_mapchan(cp); 7774 sata_setup_channel(&cp->wdc_channel); 7775 } 7776 } 7777 7778 void 7779 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7780 { 7781 struct pciide_svwsata *ss = sc->sc_cookie; 7782 7783 sc->sc_wdcdev.dma_arg = sc; 7784 sc->sc_wdcdev.dma_init = pciide_dma_init; 7785 sc->sc_wdcdev.dma_start = pciide_dma_start; 7786 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7787 7788 /* XXX */ 7789 sc->sc_dma_iot = ss->ba5_st; 7790 sc->sc_dma_ioh = ss->ba5_sh; 7791 7792 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7793 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7794 sc->sc_dmactl_read = svwsata_dmactl_read; 7795 sc->sc_dmactl_write = svwsata_dmactl_write; 7796 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7797 7798 /* DMA registers all set up! */ 7799 sc->sc_dmat = pa->pa_dmat; 7800 sc->sc_dma_ok = 1; 7801 } 7802 7803 u_int8_t 7804 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7805 { 7806 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7807 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7808 } 7809 7810 void 7811 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7812 { 7813 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7814 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7815 } 7816 7817 u_int8_t 7818 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7819 { 7820 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7821 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7822 } 7823 7824 void 7825 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7826 { 7827 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7828 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7829 } 7830 7831 void 7832 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7833 { 7834 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7835 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7836 } 7837 7838 void 7839 svwsata_mapchan(struct pciide_channel *cp) 7840 { 7841 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7842 struct channel_softc *wdc_cp = &cp->wdc_channel; 7843 struct pciide_svwsata *ss = sc->sc_cookie; 7844 7845 cp->compat = 0; 7846 cp->ih = sc->sc_pci_ih; 7847 7848 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7849 (wdc_cp->channel << 8) + SVWSATA_TF0, 7850 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7851 printf("%s: couldn't map %s cmd regs\n", 7852 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7853 return; 7854 } 7855 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7856 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7857 &wdc_cp->ctl_ioh) != 0) { 7858 printf("%s: couldn't map %s ctl regs\n", 7859 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7860 return; 7861 } 7862 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7863 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7864 wdc_cp->ch_flags |= WDCF_DMA_BEFORE_CMD; 7865 wdcattach(wdc_cp); 7866 } 7867 7868 void 7869 svwsata_drv_probe(struct channel_softc *chp) 7870 { 7871 struct pciide_channel *cp = (struct pciide_channel *)chp; 7872 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7873 struct pciide_svwsata *ss = sc->sc_cookie; 7874 int channel = chp->channel; 7875 uint32_t scontrol, sstatus; 7876 uint8_t scnt, sn, cl, ch; 7877 int s; 7878 7879 /* 7880 * Request communication initialization sequence, any speed. 7881 * Performing this is the equivalent of an ATA Reset. 7882 */ 7883 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7884 7885 /* 7886 * XXX We don't yet support SATA power management; disable all 7887 * power management state transitions. 7888 */ 7889 scontrol |= SControl_IPM_NONE; 7890 7891 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7892 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7893 delay(50 * 1000); 7894 scontrol &= ~SControl_DET_INIT; 7895 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7896 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7897 delay(50 * 1000); 7898 7899 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7900 (channel << 8) + SVWSATA_SSTATUS); 7901 #if 0 7902 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7903 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7904 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7905 (channel << 8) + SVWSATA_SSTATUS)); 7906 #endif 7907 switch (sstatus & SStatus_DET_mask) { 7908 case SStatus_DET_NODEV: 7909 /* No device; be silent. */ 7910 break; 7911 7912 case SStatus_DET_DEV_NE: 7913 printf("%s: port %d: device connected, but " 7914 "communication not established\n", 7915 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7916 break; 7917 7918 case SStatus_DET_OFFLINE: 7919 printf("%s: port %d: PHY offline\n", 7920 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7921 break; 7922 7923 case SStatus_DET_DEV: 7924 /* 7925 * XXX ATAPI detection doesn't currently work. Don't 7926 * XXX know why. But, it's not like the standard method 7927 * XXX can detect an ATAPI device connected via a SATA/PATA 7928 * XXX bridge, so at least this is no worse. --thorpej 7929 */ 7930 if (chp->_vtbl != NULL) 7931 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7932 else 7933 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7934 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7935 delay(10); /* 400ns delay */ 7936 /* Save register contents. */ 7937 if (chp->_vtbl != NULL) { 7938 scnt = CHP_READ_REG(chp, wdr_seccnt); 7939 sn = CHP_READ_REG(chp, wdr_sector); 7940 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7941 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7942 } else { 7943 scnt = bus_space_read_1(chp->cmd_iot, 7944 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7945 sn = bus_space_read_1(chp->cmd_iot, 7946 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7947 cl = bus_space_read_1(chp->cmd_iot, 7948 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7949 ch = bus_space_read_1(chp->cmd_iot, 7950 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7951 } 7952 #if 0 7953 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7954 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7955 scnt, sn, cl, ch); 7956 #endif 7957 /* 7958 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7959 * cases we get wrong values here, so ignore it. 7960 */ 7961 s = splbio(); 7962 if (cl == 0x14 && ch == 0xeb) 7963 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7964 else 7965 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7966 splx(s); 7967 7968 printf("%s: port %d: device present", 7969 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7970 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7971 case 1: 7972 printf(", speed: 1.5Gb/s"); 7973 break; 7974 case 2: 7975 printf(", speed: 3.0Gb/s"); 7976 break; 7977 } 7978 printf("\n"); 7979 break; 7980 7981 default: 7982 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7983 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7984 } 7985 } 7986 7987 u_int8_t 7988 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7989 { 7990 if (reg & _WDC_AUX) { 7991 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 7992 (reg & _WDC_REGMASK) << 2)); 7993 } else { 7994 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 7995 (reg & _WDC_REGMASK) << 2)); 7996 } 7997 } 7998 7999 void 8000 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 8001 { 8002 if (reg & _WDC_AUX) { 8003 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 8004 (reg & _WDC_REGMASK) << 2, val); 8005 } else { 8006 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 8007 (reg & _WDC_REGMASK) << 2, val); 8008 } 8009 } 8010 8011 void 8012 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 8013 { 8014 if (reg & _WDC_AUX) { 8015 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 8016 (reg & _WDC_REGMASK) << 2, val); 8017 } else { 8018 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 8019 (reg & _WDC_REGMASK) << 2, val); 8020 } 8021 } 8022 8023 #define ACARD_IS_850(sc) \ 8024 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 8025 8026 void 8027 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8028 { 8029 struct pciide_channel *cp; 8030 int i; 8031 pcireg_t interface; 8032 bus_size_t cmdsize, ctlsize; 8033 8034 /* 8035 * when the chip is in native mode it identifies itself as a 8036 * 'misc mass storage'. Fake interface in this case. 8037 */ 8038 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 8039 interface = PCI_INTERFACE(pa->pa_class); 8040 } else { 8041 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8042 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8043 } 8044 8045 printf(": DMA"); 8046 pciide_mapreg_dma(sc, pa); 8047 printf("\n"); 8048 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8049 WDC_CAPABILITY_MODE; 8050 8051 if (sc->sc_dma_ok) { 8052 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8053 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8054 sc->sc_wdcdev.irqack = pciide_irqack; 8055 } 8056 sc->sc_wdcdev.PIO_cap = 4; 8057 sc->sc_wdcdev.DMA_cap = 2; 8058 switch (sc->sc_pp->ide_product) { 8059 case PCI_PRODUCT_ACARD_ATP850U: 8060 sc->sc_wdcdev.UDMA_cap = 2; 8061 break; 8062 case PCI_PRODUCT_ACARD_ATP860: 8063 case PCI_PRODUCT_ACARD_ATP860A: 8064 sc->sc_wdcdev.UDMA_cap = 4; 8065 break; 8066 case PCI_PRODUCT_ACARD_ATP865A: 8067 case PCI_PRODUCT_ACARD_ATP865R: 8068 sc->sc_wdcdev.UDMA_cap = 6; 8069 break; 8070 } 8071 8072 sc->sc_wdcdev.set_modes = acard_setup_channel; 8073 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8074 sc->sc_wdcdev.nchannels = 2; 8075 8076 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8077 cp = &sc->pciide_channels[i]; 8078 if (pciide_chansetup(sc, i, interface) == 0) 8079 continue; 8080 if (interface & PCIIDE_INTERFACE_PCI(i)) { 8081 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 8082 &ctlsize, pciide_pci_intr); 8083 } else { 8084 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 8085 &cmdsize, &ctlsize); 8086 } 8087 if (cp->hw_ok == 0) 8088 return; 8089 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 8090 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 8091 wdcattach(&cp->wdc_channel); 8092 acard_setup_channel(&cp->wdc_channel); 8093 } 8094 if (!ACARD_IS_850(sc)) { 8095 u_int32_t reg; 8096 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 8097 reg &= ~ATP860_CTRL_INT; 8098 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 8099 } 8100 } 8101 8102 void 8103 acard_setup_channel(struct channel_softc *chp) 8104 { 8105 struct ata_drive_datas *drvp; 8106 struct pciide_channel *cp = (struct pciide_channel *)chp; 8107 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8108 int channel = chp->channel; 8109 int drive; 8110 u_int32_t idetime, udma_mode; 8111 u_int32_t idedma_ctl; 8112 8113 /* setup DMA if needed */ 8114 pciide_channel_dma_setup(cp); 8115 8116 if (ACARD_IS_850(sc)) { 8117 idetime = 0; 8118 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 8119 udma_mode &= ~ATP850_UDMA_MASK(channel); 8120 } else { 8121 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 8122 idetime &= ~ATP860_SETTIME_MASK(channel); 8123 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 8124 udma_mode &= ~ATP860_UDMA_MASK(channel); 8125 } 8126 8127 idedma_ctl = 0; 8128 8129 /* Per drive settings */ 8130 for (drive = 0; drive < 2; drive++) { 8131 drvp = &chp->ch_drive[drive]; 8132 /* If no drive, skip */ 8133 if ((drvp->drive_flags & DRIVE) == 0) 8134 continue; 8135 /* add timing values, setup DMA if needed */ 8136 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 8137 (drvp->drive_flags & DRIVE_UDMA)) { 8138 /* use Ultra/DMA */ 8139 if (ACARD_IS_850(sc)) { 8140 idetime |= ATP850_SETTIME(drive, 8141 acard_act_udma[drvp->UDMA_mode], 8142 acard_rec_udma[drvp->UDMA_mode]); 8143 udma_mode |= ATP850_UDMA_MODE(channel, drive, 8144 acard_udma_conf[drvp->UDMA_mode]); 8145 } else { 8146 idetime |= ATP860_SETTIME(channel, drive, 8147 acard_act_udma[drvp->UDMA_mode], 8148 acard_rec_udma[drvp->UDMA_mode]); 8149 udma_mode |= ATP860_UDMA_MODE(channel, drive, 8150 acard_udma_conf[drvp->UDMA_mode]); 8151 } 8152 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8153 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 8154 (drvp->drive_flags & DRIVE_DMA)) { 8155 /* use Multiword DMA */ 8156 drvp->drive_flags &= ~DRIVE_UDMA; 8157 if (ACARD_IS_850(sc)) { 8158 idetime |= ATP850_SETTIME(drive, 8159 acard_act_dma[drvp->DMA_mode], 8160 acard_rec_dma[drvp->DMA_mode]); 8161 } else { 8162 idetime |= ATP860_SETTIME(channel, drive, 8163 acard_act_dma[drvp->DMA_mode], 8164 acard_rec_dma[drvp->DMA_mode]); 8165 } 8166 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8167 } else { 8168 /* PIO only */ 8169 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 8170 if (ACARD_IS_850(sc)) { 8171 idetime |= ATP850_SETTIME(drive, 8172 acard_act_pio[drvp->PIO_mode], 8173 acard_rec_pio[drvp->PIO_mode]); 8174 } else { 8175 idetime |= ATP860_SETTIME(channel, drive, 8176 acard_act_pio[drvp->PIO_mode], 8177 acard_rec_pio[drvp->PIO_mode]); 8178 } 8179 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 8180 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 8181 | ATP8x0_CTRL_EN(channel)); 8182 } 8183 } 8184 8185 if (idedma_ctl != 0) { 8186 /* Add software bits in status register */ 8187 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8188 IDEDMA_CTL(channel), idedma_ctl); 8189 } 8190 pciide_print_modes(cp); 8191 8192 if (ACARD_IS_850(sc)) { 8193 pci_conf_write(sc->sc_pc, sc->sc_tag, 8194 ATP850_IDETIME(channel), idetime); 8195 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 8196 } else { 8197 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 8198 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 8199 } 8200 } 8201 8202 void 8203 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8204 { 8205 struct pciide_channel *cp; 8206 int channel; 8207 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8208 bus_size_t cmdsize, ctlsize; 8209 u_int32_t conf; 8210 8211 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8212 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8213 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8214 8215 printf(": DMA"); 8216 pciide_mapreg_dma(sc, pa); 8217 8218 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8219 WDC_CAPABILITY_MODE; 8220 if (sc->sc_dma_ok) { 8221 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8222 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8223 sc->sc_wdcdev.irqack = pciide_irqack; 8224 } 8225 sc->sc_wdcdev.PIO_cap = 4; 8226 sc->sc_wdcdev.DMA_cap = 2; 8227 switch (sc->sc_pp->ide_product) { 8228 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 8229 sc->sc_wdcdev.UDMA_cap = 5; 8230 break; 8231 default: 8232 sc->sc_wdcdev.UDMA_cap = 6; 8233 } 8234 sc->sc_wdcdev.set_modes = nforce_setup_channel; 8235 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8236 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8237 8238 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8239 8240 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8241 cp = &sc->pciide_channels[channel]; 8242 8243 if (pciide_chansetup(sc, channel, interface) == 0) 8244 continue; 8245 8246 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 8247 printf("%s: %s ignored (disabled)\n", 8248 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8249 continue; 8250 } 8251 8252 pciide_map_compat_intr(pa, cp, channel, interface); 8253 if (cp->hw_ok == 0) 8254 continue; 8255 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8256 nforce_pci_intr); 8257 if (cp->hw_ok == 0) { 8258 pciide_unmap_compat_intr(pa, cp, channel, interface); 8259 continue; 8260 } 8261 8262 if (pciide_chan_candisable(cp)) { 8263 conf &= ~NFORCE_CHAN_EN(channel); 8264 pciide_unmap_compat_intr(pa, cp, channel, interface); 8265 continue; 8266 } 8267 8268 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8269 } 8270 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8271 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8272 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8273 } 8274 8275 void 8276 nforce_setup_channel(struct channel_softc *chp) 8277 { 8278 struct ata_drive_datas *drvp; 8279 int drive, mode; 8280 u_int32_t idedma_ctl; 8281 struct pciide_channel *cp = (struct pciide_channel *)chp; 8282 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8283 int channel = chp->channel; 8284 u_int32_t conf, piodmatim, piotim, udmatim; 8285 8286 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8287 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 8288 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 8289 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 8290 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 8291 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8292 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8293 8294 /* Setup DMA if needed */ 8295 pciide_channel_dma_setup(cp); 8296 8297 /* Clear all bits for this channel */ 8298 idedma_ctl = 0; 8299 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 8300 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 8301 8302 /* Per channel settings */ 8303 for (drive = 0; drive < 2; drive++) { 8304 drvp = &chp->ch_drive[drive]; 8305 8306 /* If no drive, skip */ 8307 if ((drvp->drive_flags & DRIVE) == 0) 8308 continue; 8309 8310 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8311 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8312 /* Setup UltraDMA mode */ 8313 drvp->drive_flags &= ~DRIVE_DMA; 8314 8315 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 8316 nforce_udma[drvp->UDMA_mode]) | 8317 NFORCE_UDMA_EN(channel, drive) | 8318 NFORCE_UDMA_ENM(channel, drive); 8319 8320 mode = drvp->PIO_mode; 8321 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8322 (drvp->drive_flags & DRIVE_DMA) != 0) { 8323 /* Setup multiword DMA mode */ 8324 drvp->drive_flags &= ~DRIVE_UDMA; 8325 8326 /* mode = min(pio, dma + 2) */ 8327 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8328 mode = drvp->PIO_mode; 8329 else 8330 mode = drvp->DMA_mode + 2; 8331 } else { 8332 mode = drvp->PIO_mode; 8333 goto pio; 8334 } 8335 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8336 8337 pio: 8338 /* Setup PIO mode */ 8339 if (mode <= 2) { 8340 drvp->DMA_mode = 0; 8341 drvp->PIO_mode = 0; 8342 mode = 0; 8343 } else { 8344 drvp->PIO_mode = mode; 8345 drvp->DMA_mode = mode - 2; 8346 } 8347 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 8348 nforce_pio[mode]); 8349 } 8350 8351 if (idedma_ctl != 0) { 8352 /* Add software bits in status register */ 8353 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8354 IDEDMA_CTL(channel), idedma_ctl); 8355 } 8356 8357 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 8358 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8359 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8360 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 8361 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 8362 8363 pciide_print_modes(cp); 8364 } 8365 8366 int 8367 nforce_pci_intr(void *arg) 8368 { 8369 struct pciide_softc *sc = arg; 8370 struct pciide_channel *cp; 8371 struct channel_softc *wdc_cp; 8372 int i, rv, crv; 8373 u_int32_t dmastat; 8374 8375 rv = 0; 8376 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8377 cp = &sc->pciide_channels[i]; 8378 wdc_cp = &cp->wdc_channel; 8379 8380 /* Skip compat channel */ 8381 if (cp->compat) 8382 continue; 8383 8384 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8385 IDEDMA_CTL(i)); 8386 if ((dmastat & IDEDMA_CTL_INTR) == 0) 8387 continue; 8388 8389 crv = wdcintr(wdc_cp); 8390 if (crv == 0) 8391 printf("%s:%d: bogus intr\n", 8392 sc->sc_wdcdev.sc_dev.dv_xname, i); 8393 else 8394 rv = 1; 8395 } 8396 return (rv); 8397 } 8398 8399 void 8400 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8401 { 8402 struct pciide_channel *cp; 8403 bus_size_t cmdsize, ctlsize; 8404 pcireg_t interface; 8405 int channel; 8406 8407 printf(": DMA"); 8408 #ifdef PCIIDE_I31244_DISABLEDMA 8409 if (sc->sc_rev == 0) { 8410 printf(" disabled due to rev. 0"); 8411 sc->sc_dma_ok = 0; 8412 } else 8413 #endif 8414 pciide_mapreg_dma(sc, pa); 8415 printf("\n"); 8416 8417 /* 8418 * XXX Configure LEDs to show activity. 8419 */ 8420 8421 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8422 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 8423 sc->sc_wdcdev.PIO_cap = 4; 8424 if (sc->sc_dma_ok) { 8425 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8426 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8427 sc->sc_wdcdev.irqack = pciide_irqack; 8428 sc->sc_wdcdev.DMA_cap = 2; 8429 sc->sc_wdcdev.UDMA_cap = 6; 8430 } 8431 sc->sc_wdcdev.set_modes = sata_setup_channel; 8432 8433 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8434 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8435 8436 interface = PCI_INTERFACE(pa->pa_class); 8437 8438 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8439 cp = &sc->pciide_channels[channel]; 8440 if (pciide_chansetup(sc, channel, interface) == 0) 8441 continue; 8442 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8443 pciide_pci_intr); 8444 if (cp->hw_ok == 0) 8445 continue; 8446 pciide_map_compat_intr(pa, cp, channel, interface); 8447 sata_setup_channel(&cp->wdc_channel); 8448 } 8449 } 8450 8451 void 8452 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8453 { 8454 struct pciide_channel *cp; 8455 int channel; 8456 pcireg_t interface; 8457 bus_size_t cmdsize, ctlsize; 8458 pcireg_t cfg, modectl; 8459 8460 /* 8461 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8462 */ 8463 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8464 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8465 8466 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8467 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8468 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8469 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8470 modectl & IT_MODE_MASK), DEBUG_PROBE); 8471 8472 printf(": DMA"); 8473 pciide_mapreg_dma(sc, pa); 8474 8475 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8476 WDC_CAPABILITY_MODE; 8477 if (sc->sc_dma_ok) { 8478 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8479 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8480 sc->sc_wdcdev.irqack = pciide_irqack; 8481 } 8482 sc->sc_wdcdev.PIO_cap = 4; 8483 sc->sc_wdcdev.DMA_cap = 2; 8484 sc->sc_wdcdev.UDMA_cap = 6; 8485 8486 sc->sc_wdcdev.set_modes = ite_setup_channel; 8487 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8488 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8489 8490 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8491 8492 /* Disable RAID */ 8493 modectl &= ~IT_MODE_RAID1; 8494 /* Disable CPU firmware mode */ 8495 modectl &= ~IT_MODE_CPU; 8496 8497 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8498 8499 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8500 cp = &sc->pciide_channels[channel]; 8501 8502 if (pciide_chansetup(sc, channel, interface) == 0) 8503 continue; 8504 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8505 pciide_pci_intr); 8506 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8507 } 8508 8509 /* Re-read configuration registers after channels setup */ 8510 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8511 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8512 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8513 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8514 modectl & IT_MODE_MASK), DEBUG_PROBE); 8515 } 8516 8517 void 8518 ite_setup_channel(struct channel_softc *chp) 8519 { 8520 struct ata_drive_datas *drvp; 8521 int drive, mode; 8522 u_int32_t idedma_ctl; 8523 struct pciide_channel *cp = (struct pciide_channel *)chp; 8524 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8525 int channel = chp->channel; 8526 pcireg_t cfg, modectl; 8527 pcireg_t tim; 8528 8529 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8530 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8531 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8532 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8533 channel, tim), DEBUG_PROBE); 8534 8535 /* Setup DMA if needed */ 8536 pciide_channel_dma_setup(cp); 8537 8538 /* Clear all bits for this channel */ 8539 idedma_ctl = 0; 8540 8541 /* Per channel settings */ 8542 for (drive = 0; drive < 2; drive++) { 8543 drvp = &chp->ch_drive[drive]; 8544 8545 /* If no drive, skip */ 8546 if ((drvp->drive_flags & DRIVE) == 0) 8547 continue; 8548 8549 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8550 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8551 /* Setup UltraDMA mode */ 8552 drvp->drive_flags &= ~DRIVE_DMA; 8553 modectl &= ~IT_MODE_DMA(channel, drive); 8554 8555 #if 0 8556 /* Check cable, works only in CPU firmware mode */ 8557 if (drvp->UDMA_mode > 2 && 8558 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8559 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8560 "80-wire cable not detected\n", 8561 drvp->drive_name, 8562 sc->sc_wdcdev.sc_dev.dv_xname, 8563 channel, drive), DEBUG_PROBE); 8564 drvp->UDMA_mode = 2; 8565 } 8566 #endif 8567 8568 if (drvp->UDMA_mode >= 5) 8569 tim |= IT_TIM_UDMA5(drive); 8570 else 8571 tim &= ~IT_TIM_UDMA5(drive); 8572 8573 mode = drvp->PIO_mode; 8574 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8575 (drvp->drive_flags & DRIVE_DMA) != 0) { 8576 /* Setup multiword DMA mode */ 8577 drvp->drive_flags &= ~DRIVE_UDMA; 8578 modectl |= IT_MODE_DMA(channel, drive); 8579 8580 /* mode = min(pio, dma + 2) */ 8581 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8582 mode = drvp->PIO_mode; 8583 else 8584 mode = drvp->DMA_mode + 2; 8585 } else { 8586 mode = drvp->PIO_mode; 8587 goto pio; 8588 } 8589 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8590 8591 pio: 8592 /* Setup PIO mode */ 8593 if (mode <= 2) { 8594 drvp->DMA_mode = 0; 8595 drvp->PIO_mode = 0; 8596 mode = 0; 8597 } else { 8598 drvp->PIO_mode = mode; 8599 drvp->DMA_mode = mode - 2; 8600 } 8601 8602 /* Enable IORDY if PIO mode >= 3 */ 8603 if (drvp->PIO_mode >= 3) 8604 cfg |= IT_CFG_IORDY(channel); 8605 } 8606 8607 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8608 tim), DEBUG_PROBE); 8609 8610 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8611 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8612 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8613 8614 if (idedma_ctl != 0) { 8615 /* Add software bits in status register */ 8616 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8617 IDEDMA_CTL(channel), idedma_ctl); 8618 } 8619 8620 pciide_print_modes(cp); 8621 } 8622 8623 void 8624 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8625 { 8626 struct pciide_channel *cp; 8627 int channel; 8628 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8629 bus_size_t cmdsize, ctlsize; 8630 8631 printf(": DMA"); 8632 pciide_mapreg_dma(sc, pa); 8633 8634 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8635 WDC_CAPABILITY_MODE; 8636 if (sc->sc_dma_ok) { 8637 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8638 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8639 sc->sc_wdcdev.irqack = pciide_irqack; 8640 } 8641 sc->sc_wdcdev.PIO_cap = 4; 8642 sc->sc_wdcdev.DMA_cap = 2; 8643 sc->sc_wdcdev.UDMA_cap = 6; 8644 8645 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8646 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8647 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8648 8649 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8650 8651 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8652 cp = &sc->pciide_channels[channel]; 8653 if (pciide_chansetup(sc, channel, interface) == 0) 8654 continue; 8655 pciide_map_compat_intr(pa, cp, channel, interface); 8656 if (cp->hw_ok == 0) 8657 continue; 8658 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8659 pciide_pci_intr); 8660 if (cp->hw_ok == 0) { 8661 pciide_unmap_compat_intr(pa, cp, channel, interface); 8662 continue; 8663 } 8664 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8665 } 8666 } 8667 8668 void 8669 ixp_setup_channel(struct channel_softc *chp) 8670 { 8671 struct ata_drive_datas *drvp; 8672 int drive, mode; 8673 u_int32_t idedma_ctl; 8674 struct pciide_channel *cp = (struct pciide_channel*)chp; 8675 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8676 int channel = chp->channel; 8677 pcireg_t udma, mdma_timing, pio, pio_timing; 8678 8679 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8680 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8681 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8682 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8683 8684 /* Setup DMA if needed */ 8685 pciide_channel_dma_setup(cp); 8686 8687 idedma_ctl = 0; 8688 8689 /* Per channel settings */ 8690 for (drive = 0; drive < 2; drive++) { 8691 drvp = &chp->ch_drive[drive]; 8692 8693 /* If no drive, skip */ 8694 if ((drvp->drive_flags & DRIVE) == 0) 8695 continue; 8696 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8697 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8698 /* Setup UltraDMA mode */ 8699 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8700 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8701 IXP_SET_MODE(udma, chp->channel, drive, 8702 drvp->UDMA_mode); 8703 mode = drvp->PIO_mode; 8704 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8705 (drvp->drive_flags & DRIVE_DMA) != 0) { 8706 /* Setup multiword DMA mode */ 8707 drvp->drive_flags &= ~DRIVE_UDMA; 8708 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8709 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8710 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8711 ixp_mdma_timings[drvp->DMA_mode]); 8712 8713 /* mode = min(pio, dma + 2) */ 8714 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8715 mode = drvp->PIO_mode; 8716 else 8717 mode = drvp->DMA_mode + 2; 8718 } else { 8719 mode = drvp->PIO_mode; 8720 } 8721 8722 /* Setup PIO mode */ 8723 drvp->PIO_mode = mode; 8724 if (mode < 2) 8725 drvp->DMA_mode = 0; 8726 else 8727 drvp->DMA_mode = mode - 2; 8728 /* 8729 * Set PIO mode and timings 8730 * Linux driver avoids PIO mode 1, let's do it too. 8731 */ 8732 if (drvp->PIO_mode == 1) 8733 drvp->PIO_mode = 0; 8734 8735 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8736 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8737 ixp_pio_timings[drvp->PIO_mode]); 8738 } 8739 8740 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8741 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8742 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8743 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8744 8745 if (idedma_ctl != 0) { 8746 /* Add software bits in status register */ 8747 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8748 IDEDMA_CTL(channel), idedma_ctl); 8749 } 8750 8751 pciide_print_modes(cp); 8752 } 8753 8754 void 8755 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8756 { 8757 struct pciide_channel *cp; 8758 int channel; 8759 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8760 bus_size_t cmdsize, ctlsize; 8761 u_int32_t conf; 8762 8763 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8764 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8765 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8766 8767 printf(": DMA"); 8768 pciide_mapreg_dma(sc, pa); 8769 8770 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8771 WDC_CAPABILITY_MODE; 8772 if (sc->sc_dma_ok) { 8773 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8774 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8775 sc->sc_wdcdev.irqack = pciide_irqack; 8776 } 8777 sc->sc_wdcdev.PIO_cap = 4; 8778 sc->sc_wdcdev.DMA_cap = 2; 8779 sc->sc_wdcdev.UDMA_cap = 6; 8780 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8781 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8782 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8783 8784 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8785 8786 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8787 cp = &sc->pciide_channels[channel]; 8788 8789 if (pciide_chansetup(sc, channel, interface) == 0) 8790 continue; 8791 8792 #if 0 8793 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8794 printf("%s: %s ignored (disabled)\n", 8795 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8796 continue; 8797 } 8798 #endif 8799 8800 pciide_map_compat_intr(pa, cp, channel, interface); 8801 if (cp->hw_ok == 0) 8802 continue; 8803 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8804 pciide_pci_intr); 8805 if (cp->hw_ok == 0) { 8806 pciide_unmap_compat_intr(pa, cp, channel, interface); 8807 continue; 8808 } 8809 8810 if (pciide_chan_candisable(cp)) { 8811 conf &= ~JMICRON_CHAN_EN(channel); 8812 pciide_unmap_compat_intr(pa, cp, channel, interface); 8813 continue; 8814 } 8815 8816 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8817 } 8818 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8819 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8820 pci_conf_write(sc->sc_pc, sc->sc_tag, JMICRON_CONF, conf); 8821 } 8822 8823 void 8824 jmicron_setup_channel(struct channel_softc *chp) 8825 { 8826 struct ata_drive_datas *drvp; 8827 int drive, mode; 8828 u_int32_t idedma_ctl; 8829 struct pciide_channel *cp = (struct pciide_channel *)chp; 8830 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8831 int channel = chp->channel; 8832 u_int32_t conf; 8833 8834 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8835 8836 /* Setup DMA if needed */ 8837 pciide_channel_dma_setup(cp); 8838 8839 /* Clear all bits for this channel */ 8840 idedma_ctl = 0; 8841 8842 /* Per channel settings */ 8843 for (drive = 0; drive < 2; drive++) { 8844 drvp = &chp->ch_drive[drive]; 8845 8846 /* If no drive, skip */ 8847 if ((drvp->drive_flags & DRIVE) == 0) 8848 continue; 8849 8850 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8851 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8852 /* Setup UltraDMA mode */ 8853 drvp->drive_flags &= ~DRIVE_DMA; 8854 8855 /* see if cable is up to scratch */ 8856 if ((conf & JMICRON_CONF_40PIN) && 8857 (drvp->UDMA_mode > 2)) 8858 drvp->UDMA_mode = 2; 8859 8860 mode = drvp->PIO_mode; 8861 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8862 (drvp->drive_flags & DRIVE_DMA) != 0) { 8863 /* Setup multiword DMA mode */ 8864 drvp->drive_flags &= ~DRIVE_UDMA; 8865 8866 /* mode = min(pio, dma + 2) */ 8867 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8868 mode = drvp->PIO_mode; 8869 else 8870 mode = drvp->DMA_mode + 2; 8871 } else { 8872 mode = drvp->PIO_mode; 8873 goto pio; 8874 } 8875 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8876 8877 pio: 8878 /* Setup PIO mode */ 8879 if (mode <= 2) { 8880 drvp->DMA_mode = 0; 8881 drvp->PIO_mode = 0; 8882 } else { 8883 drvp->PIO_mode = mode; 8884 drvp->DMA_mode = mode - 2; 8885 } 8886 } 8887 8888 if (idedma_ctl != 0) { 8889 /* Add software bits in status register */ 8890 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8891 IDEDMA_CTL(channel), idedma_ctl); 8892 } 8893 8894 pciide_print_modes(cp); 8895 } 8896 8897 void 8898 phison_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8899 { 8900 struct pciide_channel *cp; 8901 int channel; 8902 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8903 bus_size_t cmdsize, ctlsize; 8904 8905 sc->chip_unmap = default_chip_unmap; 8906 8907 printf(": DMA"); 8908 pciide_mapreg_dma(sc, pa); 8909 8910 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8911 WDC_CAPABILITY_MODE; 8912 if (sc->sc_dma_ok) { 8913 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8914 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8915 sc->sc_wdcdev.irqack = pciide_irqack; 8916 } 8917 sc->sc_wdcdev.PIO_cap = 4; 8918 sc->sc_wdcdev.DMA_cap = 2; 8919 sc->sc_wdcdev.UDMA_cap = 5; 8920 sc->sc_wdcdev.set_modes = phison_setup_channel; 8921 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8922 sc->sc_wdcdev.nchannels = 1; 8923 8924 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8925 8926 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8927 cp = &sc->pciide_channels[channel]; 8928 8929 if (pciide_chansetup(sc, channel, interface) == 0) 8930 continue; 8931 8932 pciide_map_compat_intr(pa, cp, channel, interface); 8933 if (cp->hw_ok == 0) 8934 continue; 8935 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8936 pciide_pci_intr); 8937 if (cp->hw_ok == 0) { 8938 pciide_unmap_compat_intr(pa, cp, channel, interface); 8939 continue; 8940 } 8941 8942 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8943 } 8944 } 8945 8946 void 8947 phison_setup_channel(struct channel_softc *chp) 8948 { 8949 struct ata_drive_datas *drvp; 8950 int drive, mode; 8951 u_int32_t idedma_ctl; 8952 struct pciide_channel *cp = (struct pciide_channel *)chp; 8953 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8954 int channel = chp->channel; 8955 8956 /* Setup DMA if needed */ 8957 pciide_channel_dma_setup(cp); 8958 8959 /* Clear all bits for this channel */ 8960 idedma_ctl = 0; 8961 8962 /* Per channel settings */ 8963 for (drive = 0; drive < 2; drive++) { 8964 drvp = &chp->ch_drive[drive]; 8965 8966 /* If no drive, skip */ 8967 if ((drvp->drive_flags & DRIVE) == 0) 8968 continue; 8969 8970 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8971 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8972 /* Setup UltraDMA mode */ 8973 drvp->drive_flags &= ~DRIVE_DMA; 8974 mode = drvp->PIO_mode; 8975 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8976 (drvp->drive_flags & DRIVE_DMA) != 0) { 8977 /* Setup multiword DMA mode */ 8978 drvp->drive_flags &= ~DRIVE_UDMA; 8979 8980 /* mode = min(pio, dma + 2) */ 8981 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8982 mode = drvp->PIO_mode; 8983 else 8984 mode = drvp->DMA_mode + 2; 8985 } else { 8986 mode = drvp->PIO_mode; 8987 goto pio; 8988 } 8989 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8990 8991 pio: 8992 /* Setup PIO mode */ 8993 if (mode <= 2) { 8994 drvp->DMA_mode = 0; 8995 drvp->PIO_mode = 0; 8996 } else { 8997 drvp->PIO_mode = mode; 8998 drvp->DMA_mode = mode - 2; 8999 } 9000 } 9001 9002 if (idedma_ctl != 0) { 9003 /* Add software bits in status register */ 9004 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 9005 IDEDMA_CTL(channel), idedma_ctl); 9006 } 9007 9008 pciide_print_modes(cp); 9009 } 9010 9011 void 9012 sch_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 9013 { 9014 struct pciide_channel *cp; 9015 int channel; 9016 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 9017 bus_size_t cmdsize, ctlsize; 9018 9019 printf(": DMA"); 9020 pciide_mapreg_dma(sc, pa); 9021 9022 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 9023 WDC_CAPABILITY_MODE; 9024 if (sc->sc_dma_ok) { 9025 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 9026 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 9027 sc->sc_wdcdev.irqack = pciide_irqack; 9028 } 9029 sc->sc_wdcdev.PIO_cap = 4; 9030 sc->sc_wdcdev.DMA_cap = 2; 9031 sc->sc_wdcdev.UDMA_cap = 5; 9032 sc->sc_wdcdev.set_modes = sch_setup_channel; 9033 sc->sc_wdcdev.channels = sc->wdc_chanarray; 9034 sc->sc_wdcdev.nchannels = 1; 9035 9036 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 9037 9038 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 9039 cp = &sc->pciide_channels[channel]; 9040 9041 if (pciide_chansetup(sc, channel, interface) == 0) 9042 continue; 9043 9044 pciide_map_compat_intr(pa, cp, channel, interface); 9045 if (cp->hw_ok == 0) 9046 continue; 9047 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 9048 pciide_pci_intr); 9049 if (cp->hw_ok == 0) { 9050 pciide_unmap_compat_intr(pa, cp, channel, interface); 9051 continue; 9052 } 9053 9054 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 9055 } 9056 } 9057 9058 void 9059 sch_setup_channel(struct channel_softc *chp) 9060 { 9061 struct ata_drive_datas *drvp; 9062 int drive, mode; 9063 u_int32_t tim, timaddr; 9064 struct pciide_channel *cp = (struct pciide_channel *)chp; 9065 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 9066 9067 /* Setup DMA if needed */ 9068 pciide_channel_dma_setup(cp); 9069 9070 /* Per channel settings */ 9071 for (drive = 0; drive < 2; drive++) { 9072 drvp = &chp->ch_drive[drive]; 9073 9074 /* If no drive, skip */ 9075 if ((drvp->drive_flags & DRIVE) == 0) 9076 continue; 9077 9078 timaddr = (drive == 0) ? SCH_D0TIM : SCH_D1TIM; 9079 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, timaddr); 9080 tim &= ~SCH_TIM_MASK; 9081 9082 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 9083 (drvp->drive_flags & DRIVE_UDMA) != 0) { 9084 /* Setup UltraDMA mode */ 9085 drvp->drive_flags &= ~DRIVE_DMA; 9086 9087 mode = drvp->PIO_mode; 9088 tim |= (drvp->UDMA_mode << 16) | SCH_TIM_SYNCDMA; 9089 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 9090 (drvp->drive_flags & DRIVE_DMA) != 0) { 9091 /* Setup multiword DMA mode */ 9092 drvp->drive_flags &= ~DRIVE_UDMA; 9093 9094 tim &= ~SCH_TIM_SYNCDMA; 9095 9096 /* mode = min(pio, dma + 2) */ 9097 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 9098 mode = drvp->PIO_mode; 9099 else 9100 mode = drvp->DMA_mode + 2; 9101 } else { 9102 mode = drvp->PIO_mode; 9103 goto pio; 9104 } 9105 9106 pio: 9107 /* Setup PIO mode */ 9108 if (mode <= 2) { 9109 drvp->DMA_mode = 0; 9110 drvp->PIO_mode = 0; 9111 } else { 9112 drvp->PIO_mode = mode; 9113 drvp->DMA_mode = mode - 2; 9114 } 9115 tim |= (drvp->DMA_mode << 8) | (drvp->PIO_mode); 9116 pci_conf_write(sc->sc_pc, sc->sc_tag, timaddr, tim); 9117 } 9118 9119 pciide_print_modes(cp); 9120 } 9121 9122 void 9123 rdc_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 9124 { 9125 struct pciide_channel *cp; 9126 int channel; 9127 u_int32_t patr; 9128 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 9129 bus_size_t cmdsize, ctlsize; 9130 9131 printf(": DMA"); 9132 pciide_mapreg_dma(sc, pa); 9133 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 9134 if (sc->sc_dma_ok) { 9135 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 9136 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 9137 sc->sc_wdcdev.irqack = pciide_irqack; 9138 sc->sc_wdcdev.dma_init = pciide_dma_init; 9139 } 9140 sc->sc_wdcdev.PIO_cap = 4; 9141 sc->sc_wdcdev.DMA_cap = 2; 9142 sc->sc_wdcdev.UDMA_cap = 5; 9143 sc->sc_wdcdev.set_modes = rdc_setup_channel; 9144 sc->sc_wdcdev.channels = sc->wdc_chanarray; 9145 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 9146 9147 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 9148 9149 WDCDEBUG_PRINT(("rdc_chip_map: old PATR=0x%x, " 9150 "PSD1ATR=0x%x, UDCCR=0x%x, IIOCR=0x%x\n", 9151 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR), 9152 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR), 9153 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR), 9154 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR)), 9155 DEBUG_PROBE); 9156 9157 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 9158 cp = &sc->pciide_channels[channel]; 9159 9160 if (pciide_chansetup(sc, channel, interface) == 0) 9161 continue; 9162 patr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR); 9163 if ((patr & RDCIDE_PATR_EN(channel)) == 0) { 9164 printf("%s: %s ignored (disabled)\n", 9165 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 9166 continue; 9167 } 9168 pciide_map_compat_intr(pa, cp, channel, interface); 9169 if (cp->hw_ok == 0) 9170 continue; 9171 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 9172 pciide_pci_intr); 9173 if (cp->hw_ok == 0) 9174 goto next; 9175 if (pciide_chan_candisable(cp)) { 9176 patr &= ~RDCIDE_PATR_EN(channel); 9177 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PATR, 9178 patr); 9179 } 9180 if (cp->hw_ok == 0) 9181 goto next; 9182 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 9183 next: 9184 if (cp->hw_ok == 0) 9185 pciide_unmap_compat_intr(pa, cp, channel, interface); 9186 } 9187 9188 WDCDEBUG_PRINT(("rdc_chip_map: PATR=0x%x, " 9189 "PSD1ATR=0x%x, UDCCR=0x%x, IIOCR=0x%x\n", 9190 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR), 9191 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR), 9192 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR), 9193 pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR)), 9194 DEBUG_PROBE); 9195 } 9196 9197 void 9198 rdc_setup_channel(struct channel_softc *chp) 9199 { 9200 u_int8_t drive; 9201 u_int32_t patr, psd1atr, udccr, iiocr; 9202 struct pciide_channel *cp = (struct pciide_channel *)chp; 9203 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 9204 struct ata_drive_datas *drvp; 9205 9206 patr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PATR); 9207 psd1atr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR); 9208 udccr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR); 9209 iiocr = pci_conf_read(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR); 9210 9211 /* setup DMA */ 9212 pciide_channel_dma_setup(cp); 9213 9214 /* clear modes */ 9215 patr = patr & (RDCIDE_PATR_EN(0) | RDCIDE_PATR_EN(1)); 9216 psd1atr &= ~RDCIDE_PSD1ATR_SETUP_MASK(chp->channel); 9217 psd1atr &= ~RDCIDE_PSD1ATR_HOLD_MASK(chp->channel); 9218 for (drive = 0; drive < 2; drive++) { 9219 udccr &= ~RDCIDE_UDCCR_EN(chp->channel, drive); 9220 udccr &= ~RDCIDE_UDCCR_TIM_MASK(chp->channel, drive); 9221 iiocr &= ~RDCIDE_IIOCR_CLK_MASK(chp->channel, drive); 9222 } 9223 /* now setup modes */ 9224 for (drive = 0; drive < 2; drive++) { 9225 drvp = &cp->wdc_channel.ch_drive[drive]; 9226 if ((drvp->drive_flags & DRIVE) == 0) 9227 continue; 9228 if (drvp->drive_flags & DRIVE_ATAPI) 9229 patr |= RDCIDE_PATR_ATA(chp->channel, drive); 9230 if (drive == 0) { 9231 patr |= RDCIDE_PATR_SETUP(rdcide_setup[drvp->PIO_mode], 9232 chp->channel); 9233 patr |= RDCIDE_PATR_HOLD(rdcide_hold[drvp->PIO_mode], 9234 chp->channel); 9235 } else { 9236 patr |= RDCIDE_PATR_DEV1_TEN(chp->channel); 9237 psd1atr |= RDCIDE_PSD1ATR_SETUP( 9238 rdcide_setup[drvp->PIO_mode], 9239 chp->channel); 9240 psd1atr |= RDCIDE_PSD1ATR_HOLD( 9241 rdcide_hold[drvp->PIO_mode], 9242 chp->channel); 9243 } 9244 if (drvp->PIO_mode > 0) { 9245 patr |= RDCIDE_PATR_FTIM(chp->channel, drive); 9246 patr |= RDCIDE_PATR_IORDY(chp->channel, drive); 9247 } 9248 if (drvp->drive_flags & DRIVE_DMA) 9249 patr |= RDCIDE_PATR_DMAEN(chp->channel, drive); 9250 if ((drvp->drive_flags & DRIVE_UDMA) == 0) 9251 continue; 9252 9253 if ((iiocr & RDCIDE_IIOCR_CABLE(chp->channel, drive)) == 0 9254 && drvp->UDMA_mode > 2) 9255 drvp->UDMA_mode = 2; 9256 udccr |= RDCIDE_UDCCR_EN(chp->channel, drive); 9257 udccr |= RDCIDE_UDCCR_TIM(rdcide_udmatim[drvp->UDMA_mode], 9258 chp->channel, drive); 9259 iiocr |= RDCIDE_IIOCR_CLK(rdcide_udmaclk[drvp->UDMA_mode], 9260 chp->channel, drive); 9261 } 9262 9263 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PATR, patr); 9264 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_PSD1ATR, psd1atr); 9265 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_UDCCR, udccr); 9266 pci_conf_write(sc->sc_pc, sc->sc_tag, RDCIDE_IIOCR, iiocr); 9267 } 9268