1 /* $NetBSD: cmdide.c,v 1.38 2012/09/03 15:38:17 kiyohara Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: cmdide.c,v 1.38 2012/09/03 15:38:17 kiyohara Exp $"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 34 #include <dev/pci/pcivar.h> 35 #include <dev/pci/pcidevs.h> 36 #include <dev/pci/pciidereg.h> 37 #include <dev/pci/pciidevar.h> 38 #include <dev/pci/pciide_cmd_reg.h> 39 40 41 static int cmdide_match(device_t, cfdata_t, void *); 42 static void cmdide_attach(device_t, device_t, void *); 43 44 CFATTACH_DECL_NEW(cmdide, sizeof(struct pciide_softc), 45 cmdide_match, cmdide_attach, pciide_detach, NULL); 46 47 static void cmd_chip_map(struct pciide_softc*, const struct pci_attach_args*); 48 static void cmd0643_9_chip_map(struct pciide_softc*, 49 const struct pci_attach_args*); 50 static void cmd0643_9_setup_channel(struct ata_channel*); 51 static void cmd_channel_map(const struct pci_attach_args *, 52 struct pciide_softc *, int); 53 static int cmd_pci_intr(void *); 54 static void cmd646_9_irqack(struct ata_channel *); 55 static void cmd680_chip_map(struct pciide_softc*, 56 const struct pci_attach_args*); 57 static void cmd680_setup_channel(struct ata_channel*); 58 static void cmd680_channel_map(const struct pci_attach_args *, 59 struct pciide_softc *, int); 60 61 static const struct pciide_product_desc pciide_cmd_products[] = { 62 { PCI_PRODUCT_CMDTECH_640, 63 0, 64 "CMD Technology PCI0640", 65 cmd_chip_map 66 }, 67 { PCI_PRODUCT_CMDTECH_643, 68 0, 69 "CMD Technology PCI0643", 70 cmd0643_9_chip_map, 71 }, 72 { PCI_PRODUCT_CMDTECH_646, 73 0, 74 "CMD Technology PCI0646", 75 cmd0643_9_chip_map, 76 }, 77 { PCI_PRODUCT_CMDTECH_648, 78 0, 79 "CMD Technology PCI0648", 80 cmd0643_9_chip_map, 81 }, 82 { PCI_PRODUCT_CMDTECH_649, 83 0, 84 "CMD Technology PCI0649", 85 cmd0643_9_chip_map, 86 }, 87 { PCI_PRODUCT_CMDTECH_680, 88 0, 89 "Silicon Image 0680", 90 cmd680_chip_map, 91 }, 92 { 0, 93 0, 94 NULL, 95 NULL 96 } 97 }; 98 99 static int 100 cmdide_match(device_t parent, cfdata_t match, void *aux) 101 { 102 struct pci_attach_args *pa = aux; 103 104 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_CMDTECH) { 105 if (pciide_lookup_product(pa->pa_id, pciide_cmd_products)) 106 return (2); 107 } 108 return (0); 109 } 110 111 static void 112 cmdide_attach(device_t parent, device_t self, void *aux) 113 { 114 struct pci_attach_args *pa = aux; 115 struct pciide_softc *sc = device_private(self); 116 117 sc->sc_wdcdev.sc_atac.atac_dev = self; 118 119 pciide_common_attach(sc, pa, 120 pciide_lookup_product(pa->pa_id, pciide_cmd_products)); 121 122 } 123 124 static void 125 cmd_channel_map(const struct pci_attach_args *pa, struct pciide_softc *sc, 126 int channel) 127 { 128 struct pciide_channel *cp = &sc->pciide_channels[channel]; 129 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 130 int interface, one_channel; 131 132 /* 133 * The 0648/0649 can be told to identify as a RAID controller. 134 * In this case, we have to fake interface 135 */ 136 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 137 interface = PCIIDE_INTERFACE_SETTABLE(0) | 138 PCIIDE_INTERFACE_SETTABLE(1); 139 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 140 CMD_CONF_DSA1) 141 interface |= PCIIDE_INTERFACE_PCI(0) | 142 PCIIDE_INTERFACE_PCI(1); 143 } else { 144 interface = PCI_INTERFACE(pa->pa_class); 145 } 146 147 sc->wdc_chanarray[channel] = &cp->ata_channel; 148 cp->name = PCIIDE_CHANNEL_NAME(channel); 149 cp->ata_channel.ch_channel = channel; 150 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; 151 152 /* 153 * Older CMD64X doesn't have independent channels 154 */ 155 switch (sc->sc_pp->ide_product) { 156 case PCI_PRODUCT_CMDTECH_649: 157 one_channel = 0; 158 break; 159 default: 160 one_channel = 1; 161 break; 162 } 163 164 if (channel > 0 && one_channel) { 165 cp->ata_channel.ch_queue = 166 sc->pciide_channels[0].ata_channel.ch_queue; 167 } else { 168 cp->ata_channel.ch_queue = 169 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT); 170 } 171 if (cp->ata_channel.ch_queue == NULL) { 172 aprint_error("%s %s channel: " 173 "can't allocate memory for command queue", 174 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name); 175 return; 176 } 177 178 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 179 "%s channel %s to %s mode\n", cp->name, 180 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 181 "configured" : "wired", 182 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 183 "native-PCI" : "compatibility"); 184 185 /* 186 * with a CMD PCI64x, if we get here, the first channel is enabled: 187 * there's no way to disable the first channel without disabling 188 * the whole device 189 */ 190 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 191 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 192 "%s channel ignored (disabled)\n", cp->name); 193 cp->ata_channel.ch_flags |= ATACH_DISABLED; 194 return; 195 } 196 197 pciide_mapchan(pa, cp, interface, cmd_pci_intr); 198 } 199 200 static int 201 cmd_pci_intr(void *arg) 202 { 203 struct pciide_softc *sc = arg; 204 struct pciide_channel *cp; 205 struct ata_channel *wdc_cp; 206 int i, rv, crv; 207 u_int32_t priirq, secirq; 208 209 rv = 0; 210 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 211 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 212 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) { 213 cp = &sc->pciide_channels[i]; 214 wdc_cp = &cp->ata_channel; 215 /* If a compat channel skip. */ 216 if (cp->compat) 217 continue; 218 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 219 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 220 crv = wdcintr(wdc_cp); 221 if (crv == 0) { 222 aprint_error("%s:%d: bogus intr\n", 223 device_xname( 224 sc->sc_wdcdev.sc_atac.atac_dev), i); 225 sc->sc_wdcdev.irqack(wdc_cp); 226 } else 227 rv = 1; 228 } 229 } 230 return rv; 231 } 232 233 static void 234 cmd_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) 235 { 236 int channel; 237 238 /* 239 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 240 * and base addresses registers can be disabled at 241 * hardware level. In this case, the device is wired 242 * in compat mode and its first channel is always enabled, 243 * but we can't rely on PCI_COMMAND_IO_ENABLE. 244 * In fact, it seems that the first channel of the CMD PCI0640 245 * can't be disabled. 246 */ 247 248 #ifdef PCIIDE_CMD064x_DISABLE 249 if (pciide_chipen(sc, pa) == 0) 250 return; 251 #endif 252 253 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 254 "hardware does not support DMA\n"); 255 sc->sc_dma_ok = 0; 256 257 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 258 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 259 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16; 260 sc->sc_wdcdev.wdc_maxdrives = 2; 261 262 wdc_allocate_regs(&sc->sc_wdcdev); 263 264 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 265 channel++) { 266 cmd_channel_map(pa, sc, channel); 267 } 268 } 269 270 static void 271 cmd0643_9_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) 272 { 273 int channel; 274 pcireg_t rev = PCI_REVISION(pa->pa_class); 275 276 /* 277 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 278 * and base addresses registers can be disabled at 279 * hardware level. In this case, the device is wired 280 * in compat mode and its first channel is always enabled, 281 * but we can't rely on PCI_COMMAND_IO_ENABLE. 282 * In fact, it seems that the first channel of the CMD PCI0640 283 * can't be disabled. 284 */ 285 286 #ifdef PCIIDE_CMD064x_DISABLE 287 if (pciide_chipen(sc, pa) == 0) 288 return; 289 #endif 290 291 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 292 "bus-master DMA support present"); 293 pciide_mapreg_dma(sc, pa); 294 aprint_verbose("\n"); 295 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; 296 if (sc->sc_dma_ok) { 297 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 298 switch (sc->sc_pp->ide_product) { 299 case PCI_PRODUCT_CMDTECH_649: 300 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 301 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; 302 sc->sc_wdcdev.irqack = cmd646_9_irqack; 303 break; 304 case PCI_PRODUCT_CMDTECH_648: 305 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 306 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; 307 sc->sc_wdcdev.irqack = cmd646_9_irqack; 308 break; 309 case PCI_PRODUCT_CMDTECH_646: 310 if (rev >= CMD0646U2_REV) { 311 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 312 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; 313 } else if (rev >= CMD0646U_REV) { 314 /* 315 * Linux's driver claims that the 646U is broken 316 * with UDMA. Only enable it if we know what we're 317 * doing 318 */ 319 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 320 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 321 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; 322 #endif 323 /* explicitly disable UDMA */ 324 pciide_pci_write(sc->sc_pc, sc->sc_tag, 325 CMD_UDMATIM(0), 0); 326 pciide_pci_write(sc->sc_pc, sc->sc_tag, 327 CMD_UDMATIM(1), 0); 328 } 329 sc->sc_wdcdev.irqack = cmd646_9_irqack; 330 break; 331 default: 332 sc->sc_wdcdev.irqack = pciide_irqack; 333 } 334 } 335 336 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 337 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 338 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 339 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 340 sc->sc_wdcdev.sc_atac.atac_set_modes = cmd0643_9_setup_channel; 341 sc->sc_wdcdev.wdc_maxdrives = 2; 342 343 ATADEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 344 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 345 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 346 DEBUG_PROBE); 347 348 wdc_allocate_regs(&sc->sc_wdcdev); 349 350 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 351 channel++) 352 cmd_channel_map(pa, sc, channel); 353 354 /* 355 * note - this also makes sure we clear the irq disable and reset 356 * bits 357 */ 358 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 359 ATADEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 360 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 361 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 362 DEBUG_PROBE); 363 } 364 365 static void 366 cmd0643_9_setup_channel(struct ata_channel *chp) 367 { 368 struct ata_drive_datas *drvp; 369 u_int8_t tim; 370 u_int32_t idedma_ctl, udma_reg; 371 int drive, s; 372 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 373 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 374 375 idedma_ctl = 0; 376 /* setup DMA if needed */ 377 pciide_channel_dma_setup(cp); 378 379 for (drive = 0; drive < 2; drive++) { 380 drvp = &chp->ch_drive[drive]; 381 /* If no drive, skip */ 382 if (drvp->drive_type == ATA_DRIVET_NONE) 383 continue; 384 /* add timing values, setup DMA if needed */ 385 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 386 if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) { 387 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 388 /* UltraDMA on a 646U2, 0648 or 0649 */ 389 s = splbio(); 390 drvp->drive_flags &= ~ATA_DRIVE_DMA; 391 splx(s); 392 udma_reg = pciide_pci_read(sc->sc_pc, 393 sc->sc_tag, CMD_UDMATIM(chp->ch_channel)); 394 if (drvp->UDMA_mode > 2 && 395 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 396 CMD_BICSR) & 397 CMD_BICSR_80(chp->ch_channel)) == 0) 398 drvp->UDMA_mode = 2; 399 if (drvp->UDMA_mode > 2) 400 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 401 else if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 2) 402 udma_reg |= CMD_UDMATIM_UDMA33(drive); 403 udma_reg |= CMD_UDMATIM_UDMA(drive); 404 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 405 CMD_UDMATIM_TIM_OFF(drive)); 406 udma_reg |= 407 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 408 CMD_UDMATIM_TIM_OFF(drive)); 409 pciide_pci_write(sc->sc_pc, sc->sc_tag, 410 CMD_UDMATIM(chp->ch_channel), udma_reg); 411 } else { 412 /* 413 * use Multiword DMA. 414 * Timings will be used for both PIO and DMA, 415 * so adjust DMA mode if needed 416 * if we have a 0646U2/8/9, turn off UDMA 417 */ 418 if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) { 419 udma_reg = pciide_pci_read(sc->sc_pc, 420 sc->sc_tag, 421 CMD_UDMATIM(chp->ch_channel)); 422 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 423 pciide_pci_write(sc->sc_pc, sc->sc_tag, 424 CMD_UDMATIM(chp->ch_channel), 425 udma_reg); 426 } 427 if (drvp->PIO_mode >= 3 && 428 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 429 drvp->DMA_mode = drvp->PIO_mode - 2; 430 } 431 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 432 } 433 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 434 } 435 pciide_pci_write(sc->sc_pc, sc->sc_tag, 436 CMD_DATA_TIM(chp->ch_channel, drive), tim); 437 } 438 if (idedma_ctl != 0) { 439 /* Add software bits in status register */ 440 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 441 idedma_ctl); 442 } 443 } 444 445 static void 446 cmd646_9_irqack(struct ata_channel *chp) 447 { 448 u_int32_t priirq, secirq; 449 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 450 451 if (chp->ch_channel == 0) { 452 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 453 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 454 } else { 455 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 456 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 457 } 458 pciide_irqack(chp); 459 } 460 461 static void 462 cmd680_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) 463 { 464 int channel; 465 466 if (pciide_chipen(sc, pa) == 0) 467 return; 468 469 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 470 "bus-master DMA support present"); 471 pciide_mapreg_dma(sc, pa); 472 aprint_verbose("\n"); 473 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; 474 if (sc->sc_dma_ok) { 475 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 476 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 477 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 478 sc->sc_wdcdev.irqack = pciide_irqack; 479 } 480 481 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 482 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 483 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 484 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 485 sc->sc_wdcdev.sc_atac.atac_set_modes = cmd680_setup_channel; 486 sc->sc_wdcdev.wdc_maxdrives = 2; 487 488 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 489 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 490 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 491 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 492 493 wdc_allocate_regs(&sc->sc_wdcdev); 494 495 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 496 channel++) 497 cmd680_channel_map(pa, sc, channel); 498 } 499 500 static void 501 cmd680_channel_map(const struct pci_attach_args *pa, struct pciide_softc *sc, 502 int channel) 503 { 504 struct pciide_channel *cp = &sc->pciide_channels[channel]; 505 int interface, i, reg; 506 static const u_int8_t init_val[] = 507 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 508 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 509 510 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 511 interface = PCIIDE_INTERFACE_SETTABLE(0) | 512 PCIIDE_INTERFACE_SETTABLE(1); 513 interface |= PCIIDE_INTERFACE_PCI(0) | 514 PCIIDE_INTERFACE_PCI(1); 515 } else { 516 interface = PCI_INTERFACE(pa->pa_class); 517 } 518 519 sc->wdc_chanarray[channel] = &cp->ata_channel; 520 cp->name = PCIIDE_CHANNEL_NAME(channel); 521 cp->ata_channel.ch_channel = channel; 522 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; 523 524 cp->ata_channel.ch_queue = 525 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT); 526 if (cp->ata_channel.ch_queue == NULL) { 527 aprint_error("%s %s channel: " 528 "can't allocate memory for command queue", 529 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name); 530 return; 531 } 532 533 /* XXX */ 534 reg = 0xa2 + channel * 16; 535 for (i = 0; i < sizeof(init_val); i++) 536 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 537 538 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 539 "%s channel %s to %s mode\n", cp->name, 540 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 541 "configured" : "wired", 542 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 543 "native-PCI" : "compatibility"); 544 545 pciide_mapchan(pa, cp, interface, pciide_pci_intr); 546 } 547 548 static void 549 cmd680_setup_channel(struct ata_channel *chp) 550 { 551 struct ata_drive_datas *drvp; 552 u_int8_t mode, off, scsc; 553 u_int16_t val; 554 u_int32_t idedma_ctl; 555 int drive, s; 556 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 557 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 558 pci_chipset_tag_t pc = sc->sc_pc; 559 pcitag_t pa = sc->sc_tag; 560 static const u_int8_t udma2_tbl[] = 561 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 562 static const u_int8_t udma_tbl[] = 563 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 564 static const u_int16_t dma_tbl[] = 565 { 0x2208, 0x10c2, 0x10c1 }; 566 static const u_int16_t pio_tbl[] = 567 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 568 569 idedma_ctl = 0; 570 pciide_channel_dma_setup(cp); 571 mode = pciide_pci_read(pc, pa, 0x80 + chp->ch_channel * 4); 572 573 for (drive = 0; drive < 2; drive++) { 574 drvp = &chp->ch_drive[drive]; 575 /* If no drive, skip */ 576 if (drvp->drive_type == ATA_DRIVET_NONE) 577 continue; 578 mode &= ~(0x03 << (drive * 4)); 579 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 580 s = splbio(); 581 drvp->drive_flags &= ~ATA_DRIVE_DMA; 582 splx(s); 583 off = 0xa0 + chp->ch_channel * 16; 584 if (drvp->UDMA_mode > 2 && 585 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 586 drvp->UDMA_mode = 2; 587 scsc = pciide_pci_read(pc, pa, 0x8a); 588 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 589 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 590 scsc = pciide_pci_read(pc, pa, 0x8a); 591 if ((scsc & 0x30) == 0) 592 drvp->UDMA_mode = 5; 593 } 594 mode |= 0x03 << (drive * 4); 595 off = 0xac + chp->ch_channel * 16 + drive * 2; 596 val = pciide_pci_read(pc, pa, off) & ~0x3f; 597 if (scsc & 0x30) 598 val |= udma2_tbl[drvp->UDMA_mode]; 599 else 600 val |= udma_tbl[drvp->UDMA_mode]; 601 pciide_pci_write(pc, pa, off, val); 602 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 603 } else if (drvp->drive_flags & ATA_DRIVE_DMA) { 604 mode |= 0x02 << (drive * 4); 605 off = 0xa8 + chp->ch_channel * 16 + drive * 2; 606 val = dma_tbl[drvp->DMA_mode]; 607 pciide_pci_write(pc, pa, off, val & 0xff); 608 pciide_pci_write(pc, pa, off+1, val >> 8); 609 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 610 } else { 611 mode |= 0x01 << (drive * 4); 612 off = 0xa4 + chp->ch_channel * 16 + drive * 2; 613 val = pio_tbl[drvp->PIO_mode]; 614 pciide_pci_write(pc, pa, off, val & 0xff); 615 pciide_pci_write(pc, pa, off+1, val >> 8); 616 } 617 } 618 619 pciide_pci_write(pc, pa, 0x80 + chp->ch_channel * 4, mode); 620 if (idedma_ctl != 0) { 621 /* Add software bits in status register */ 622 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 623 idedma_ctl); 624 } 625 } 626