1 /* $NetBSD: wdc_obio.c,v 1.47 2007/10/17 19:55:20 garbled Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Onno van der Linden. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: wdc_obio.c,v 1.47 2007/10/17 19:55:20 garbled Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/device.h> 45 #include <sys/malloc.h> 46 47 #include <uvm/uvm_extern.h> 48 49 #include <machine/bus.h> 50 #include <machine/autoconf.h> 51 #include <machine/pio.h> 52 53 #include <dev/ata/atareg.h> 54 #include <dev/ata/atavar.h> 55 #include <dev/ic/wdcvar.h> 56 57 #include <dev/ofw/openfirm.h> 58 59 #include <macppc/dev/dbdma.h> 60 61 #define WDC_REG_NPORTS 8 62 #define WDC_AUXREG_OFFSET 0x16 63 #define WDC_DEFAULT_PIO_IRQ 13 /* XXX */ 64 #define WDC_DEFAULT_DMA_IRQ 2 /* XXX */ 65 66 #define WDC_OPTIONS_DMA 0x01 67 68 /* 69 * XXX This code currently doesn't even try to allow 32-bit data port use. 70 */ 71 72 struct wdc_obio_softc { 73 struct wdc_softc sc_wdcdev; 74 struct ata_channel *sc_chanptr; 75 struct ata_channel sc_channel; 76 struct ata_queue sc_chqueue; 77 struct wdc_regs sc_wdc_regs; 78 bus_space_handle_t sc_dmaregh; 79 dbdma_regmap_t *sc_dmareg; 80 dbdma_command_t *sc_dmacmd; 81 u_int sc_dmaconf[2]; /* per target value of CONFIG_REG */ 82 void *sc_ih; 83 }; 84 85 int wdc_obio_probe __P((struct device *, struct cfdata *, void *)); 86 void wdc_obio_attach __P((struct device *, struct device *, void *)); 87 int wdc_obio_detach __P((struct device *, int)); 88 int wdc_obio_dma_init __P((void *, int, int, void *, size_t, int)); 89 void wdc_obio_dma_start __P((void *, int, int)); 90 int wdc_obio_dma_finish __P((void *, int, int, int)); 91 92 static void wdc_obio_select __P((struct ata_channel *, int)); 93 static void adjust_timing __P((struct ata_channel *)); 94 static void ata4_adjust_timing __P((struct ata_channel *)); 95 96 CFATTACH_DECL(wdc_obio, sizeof(struct wdc_obio_softc), 97 wdc_obio_probe, wdc_obio_attach, wdc_obio_detach, wdcactivate); 98 99 static const char *ata_names[] = { 100 "heathrow-ata", 101 "keylargo-ata", 102 "ohare-ata", 103 NULL 104 }; 105 106 int 107 wdc_obio_probe(parent, match, aux) 108 struct device *parent; 109 struct cfdata *match; 110 void *aux; 111 { 112 struct confargs *ca = aux; 113 114 /* XXX should not use name */ 115 if (strcmp(ca->ca_name, "ATA") == 0 || 116 strcmp(ca->ca_name, "ata") == 0 || 117 strcmp(ca->ca_name, "ata0") == 0 || 118 strcmp(ca->ca_name, "ide") == 0) 119 return 1; 120 121 if (of_compatible(ca->ca_node, ata_names) >= 0) 122 return 1; 123 124 return 0; 125 } 126 127 void 128 wdc_obio_attach(parent, self, aux) 129 struct device *parent, *self; 130 void *aux; 131 { 132 struct wdc_obio_softc *sc = (void *)self; 133 struct wdc_regs *wdr; 134 struct confargs *ca = aux; 135 struct ata_channel *chp = &sc->sc_channel; 136 int intr, i, type = IST_EDGE; 137 int use_dma = 0; 138 char path[80]; 139 140 if (device_cfdata(&sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 141 WDC_OPTIONS_DMA) { 142 if (ca->ca_nreg >= 16 || ca->ca_nintr == -1) 143 use_dma = 1; /* XXX Don't work yet. */ 144 } 145 146 if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) { 147 intr = ca->ca_intr[0]; 148 printf(" irq %d", intr); 149 if (ca->ca_nintr > 8) { 150 type = ca->ca_intr[1] ? IST_LEVEL : IST_EDGE; 151 } 152 printf(", %s triggered", (type == IST_EDGE) ? "edge" : "level"); 153 } else if (ca->ca_nintr == -1) { 154 intr = WDC_DEFAULT_PIO_IRQ; 155 printf(" irq property not found; using %d", intr); 156 } else { 157 printf(": couldn't get irq property\n"); 158 return; 159 } 160 161 if (use_dma) 162 printf(": DMA transfer"); 163 164 printf("\n"); 165 166 sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs; 167 168 wdr->cmd_iot = wdr->ctl_iot = ca->ca_tag; 169 170 if (bus_space_map(wdr->cmd_iot, ca->ca_baseaddr + ca->ca_reg[0], 171 WDC_REG_NPORTS << 4, 0, &wdr->cmd_baseioh) || 172 bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 173 WDC_AUXREG_OFFSET << 4, 1, &wdr->ctl_ioh)) { 174 printf("%s: couldn't map registers\n", 175 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 176 return; 177 } 178 179 for (i = 0; i < WDC_NREG; i++) { 180 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i << 4, 181 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 182 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 183 WDC_REG_NPORTS << 4); 184 printf("%s: couldn't subregion registers\n", 185 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 186 return; 187 } 188 } 189 #if 0 190 wdr->data32iot = wdr->cmd_iot; 191 wdr->data32ioh = wdr->cmd_ioh; 192 #endif 193 194 sc->sc_ih = intr_establish(intr, type, IPL_BIO, wdcintr, chp); 195 196 if (use_dma) { 197 sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20); 198 /* 199 * XXX 200 * we don't use ca->ca_reg[3] for size here because at least 201 * on the PB3400c it says 0x200 for both IDE channels ( the 202 * one on the mainboard and the other on the mediabay ) but 203 * their start addresses are only 0x100 apart. Since those 204 * DMA registers are always 0x100 or less we don't really 205 * have to care though 206 */ 207 if (bus_space_map(wdr->cmd_iot, ca->ca_baseaddr + ca->ca_reg[2], 208 0x100, BUS_SPACE_MAP_LINEAR, &sc->sc_dmaregh)) { 209 210 aprint_error("%s: unable to map DMA registers (%08x)\n", 211 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 212 ca->ca_reg[2]); 213 /* should unmap stuff here */ 214 return; 215 } 216 sc->sc_dmareg = bus_space_vaddr(wdr->cmd_iot, sc->sc_dmaregh); 217 218 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 219 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 220 if (strcmp(ca->ca_name, "ata-4") == 0) { 221 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 222 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; 223 sc->sc_wdcdev.sc_atac.atac_set_modes = 224 ata4_adjust_timing; 225 } else { 226 sc->sc_wdcdev.sc_atac.atac_set_modes = adjust_timing; 227 } 228 #ifdef notyet 229 /* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */ 230 if (ohare) { 231 sc->sc_wdcdev.sc_atac.atac_pio_cap = 3; 232 sc->sc_wdcdev.sc_atac.atac_dma_cap = 1; 233 } 234 #endif 235 } else { 236 /* all non-DMA controllers can use adjust_timing */ 237 sc->sc_wdcdev.sc_atac.atac_set_modes = adjust_timing; 238 } 239 240 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 241 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 242 sc->sc_chanptr = chp; 243 sc->sc_wdcdev.sc_atac.atac_channels = &sc->sc_chanptr; 244 sc->sc_wdcdev.sc_atac.atac_nchannels = 1; 245 sc->sc_wdcdev.dma_arg = sc; 246 sc->sc_wdcdev.dma_init = wdc_obio_dma_init; 247 sc->sc_wdcdev.dma_start = wdc_obio_dma_start; 248 sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish; 249 chp->ch_channel = 0; 250 chp->ch_atac = &sc->sc_wdcdev.sc_atac; 251 chp->ch_queue = &sc->sc_chqueue; 252 chp->ch_ndrive = 2; 253 254 wdc_init_shadow_regs(chp); 255 256 #define OHARE_FEATURE_REG 0xf3000038 257 258 /* XXX Enable wdc1 by feature reg. */ 259 memset(path, 0, sizeof(path)); 260 OF_package_to_path(ca->ca_node, path, sizeof(path)); 261 if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) { 262 u_int x; 263 264 x = in32rb(OHARE_FEATURE_REG); 265 x |= 8; 266 out32rb(OHARE_FEATURE_REG, x); 267 } 268 269 wdcattach(chp); 270 } 271 272 /* Multiword DMA transfer timings */ 273 struct ide_timings { 274 int cycle; /* minimum cycle time [ns] */ 275 int active; /* minimum command active time [ns] */ 276 }; 277 static struct ide_timings pio_timing[5] = { 278 { 600, 180 }, /* Mode 0 */ 279 { 390, 150 }, /* 1 */ 280 { 240, 105 }, /* 2 */ 281 { 180, 90 }, /* 3 */ 282 { 120, 75 } /* 4 */ 283 }; 284 static struct ide_timings dma_timing[3] = { 285 { 480, 240 }, /* Mode 0 */ 286 { 165, 90 }, /* Mode 1 */ 287 { 120, 75 } /* Mode 2 */ 288 }; 289 290 static struct ide_timings udma_timing[5] = { 291 {120, 180}, /* Mode 0 */ 292 { 90, 150}, /* Mode 1 */ 293 { 60, 120}, /* Mode 2 */ 294 { 45, 90}, /* Mode 3 */ 295 { 30, 90} /* Mode 4 */ 296 }; 297 298 #define TIME_TO_TICK(time) howmany((time), 30) 299 #define PIO_REC_OFFSET 4 300 #define PIO_REC_MIN 1 301 #define PIO_ACT_MIN 1 302 #define DMA_REC_OFFSET 1 303 #define DMA_REC_MIN 1 304 #define DMA_ACT_MIN 1 305 306 #define ATA4_TIME_TO_TICK(time) howmany((time), 15) /* 15 ns clock */ 307 308 #define CONFIG_REG (0x200) /* IDE access timing register */ 309 310 void 311 wdc_obio_select(chp, drive) 312 struct ata_channel *chp; 313 int drive; 314 { 315 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_atac; 316 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(chp); 317 318 bus_space_write_4(wdr->cmd_iot, wdr->cmd_baseioh, 319 CONFIG_REG, sc->sc_dmaconf[drive]); 320 } 321 322 void 323 adjust_timing(chp) 324 struct ata_channel *chp; 325 { 326 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_atac; 327 int drive; 328 int min_cycle = 0, min_active = 0; 329 int cycle_tick = 0, act_tick = 0, inact_tick = 0, half_tick; 330 331 for (drive = 0; drive < 2; drive++) { 332 u_int conf = 0; 333 struct ata_drive_datas *drvp; 334 335 drvp = &chp->ch_drive[drive]; 336 /* set up pio mode timings */ 337 if (drvp->drive_flags & DRIVE) { 338 int piomode = drvp->PIO_mode; 339 min_cycle = pio_timing[piomode].cycle; 340 min_active = pio_timing[piomode].active; 341 342 cycle_tick = TIME_TO_TICK(min_cycle); 343 act_tick = TIME_TO_TICK(min_active); 344 if (act_tick < PIO_ACT_MIN) 345 act_tick = PIO_ACT_MIN; 346 inact_tick = cycle_tick - act_tick - PIO_REC_OFFSET; 347 if (inact_tick < PIO_REC_MIN) 348 inact_tick = PIO_REC_MIN; 349 /* mask: 0x000007ff */ 350 conf |= (inact_tick << 5) | act_tick; 351 } 352 /* Set up DMA mode timings */ 353 if (drvp->drive_flags & DRIVE_DMA) { 354 int dmamode = drvp->DMA_mode; 355 min_cycle = dma_timing[dmamode].cycle; 356 min_active = dma_timing[dmamode].active; 357 cycle_tick = TIME_TO_TICK(min_cycle); 358 act_tick = TIME_TO_TICK(min_active); 359 inact_tick = cycle_tick - act_tick - DMA_REC_OFFSET; 360 if (inact_tick < DMA_REC_MIN) 361 inact_tick = DMA_REC_MIN; 362 half_tick = 0; /* XXX */ 363 /* mask: 0xfffff800 */ 364 conf |= 365 (half_tick << 21) | 366 (inact_tick << 16) | (act_tick << 11); 367 } 368 #ifdef DEBUG 369 if (conf) { 370 printf("conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n", 371 drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick); 372 } 373 #endif 374 sc->sc_dmaconf[drive] = conf; 375 } 376 sc->sc_wdcdev.select = 0; 377 if (sc->sc_dmaconf[0]) { 378 wdc_obio_select(chp,0); 379 if (sc->sc_dmaconf[1] && 380 (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) { 381 sc->sc_wdcdev.select = wdc_obio_select; 382 } 383 } else if (sc->sc_dmaconf[1]) { 384 wdc_obio_select(chp,1); 385 } 386 } 387 388 void 389 ata4_adjust_timing(chp) 390 struct ata_channel *chp; 391 { 392 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_atac; 393 int drive; 394 int min_cycle = 0, min_active = 0; 395 int cycle_tick = 0, act_tick = 0, inact_tick = 0; 396 397 for (drive = 0; drive < 2; drive++) { 398 u_int conf = 0; 399 struct ata_drive_datas *drvp; 400 401 drvp = &chp->ch_drive[drive]; 402 /* set up pio mode timings */ 403 404 if (drvp->drive_flags & DRIVE) { 405 int piomode = drvp->PIO_mode; 406 min_cycle = pio_timing[piomode].cycle; 407 min_active = pio_timing[piomode].active; 408 409 cycle_tick = ATA4_TIME_TO_TICK(min_cycle); 410 act_tick = ATA4_TIME_TO_TICK(min_active); 411 inact_tick = cycle_tick - act_tick; 412 /* mask: 0x000003ff */ 413 conf |= (inact_tick << 5) | act_tick; 414 } 415 /* set up dma mode timings */ 416 if (drvp->drive_flags & DRIVE_DMA) { 417 int dmamode = drvp->DMA_mode; 418 min_cycle = dma_timing[dmamode].cycle; 419 min_active = dma_timing[dmamode].active; 420 cycle_tick = ATA4_TIME_TO_TICK(min_cycle); 421 act_tick = ATA4_TIME_TO_TICK(min_active); 422 inact_tick = cycle_tick - act_tick; 423 /* mask: 0x001ffc00 */ 424 conf |= (act_tick << 10) | (inact_tick << 15); 425 } 426 /* set up udma mode timings */ 427 if (drvp->drive_flags & DRIVE_UDMA) { 428 int udmamode = drvp->UDMA_mode; 429 min_cycle = udma_timing[udmamode].cycle; 430 min_active = udma_timing[udmamode].active; 431 act_tick = ATA4_TIME_TO_TICK(min_active); 432 cycle_tick = ATA4_TIME_TO_TICK(min_cycle); 433 /* mask: 0x1ff00000 */ 434 conf |= (cycle_tick << 21) | (act_tick << 25) | 0x100000; 435 } 436 #ifdef DEBUG 437 if (conf) { 438 printf("ata4 conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n", 439 drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick); 440 } 441 #endif 442 sc->sc_dmaconf[drive] = conf; 443 } 444 sc->sc_wdcdev.select = 0; 445 if (sc->sc_dmaconf[0]) { 446 wdc_obio_select(chp,0); 447 if (sc->sc_dmaconf[1] && 448 (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) { 449 sc->sc_wdcdev.select = wdc_obio_select; 450 } 451 } else if (sc->sc_dmaconf[1]) { 452 wdc_obio_select(chp,1); 453 } 454 } 455 456 int 457 wdc_obio_detach(self, flags) 458 struct device *self; 459 int flags; 460 { 461 struct wdc_obio_softc *sc = (void *)self; 462 int error; 463 464 if ((error = wdcdetach(self, flags)) != 0) 465 return error; 466 467 intr_disestablish(sc->sc_ih); 468 469 /* Unmap our i/o space. */ 470 bus_space_unmap(sc->sc_wdcdev.regs->cmd_iot, 471 sc->sc_wdcdev.regs->cmd_baseioh, WDC_REG_NPORTS << 4); 472 473 /* Unmap DMA registers. */ 474 /* XXX unmapiodev(sc->sc_dmareg); */ 475 /* XXX free(sc->sc_dmacmd); */ 476 477 return 0; 478 } 479 480 int 481 wdc_obio_dma_init(v, channel, drive, databuf, datalen, flags) 482 void *v; 483 void *databuf; 484 size_t datalen; 485 int flags; 486 { 487 struct wdc_obio_softc *sc = v; 488 vaddr_t va = (vaddr_t)databuf; 489 dbdma_command_t *cmdp; 490 u_int cmd, offset; 491 int read = flags & WDC_DMA_READ; 492 493 cmdp = sc->sc_dmacmd; 494 cmd = read ? DBDMA_CMD_IN_MORE : DBDMA_CMD_OUT_MORE; 495 496 offset = va & PGOFSET; 497 498 /* if va is not page-aligned, setup the first page */ 499 if (offset != 0) { 500 int rest = PAGE_SIZE - offset; /* the rest of the page */ 501 502 if (datalen > rest) { /* if continues to next page */ 503 DBDMA_BUILD(cmdp, cmd, 0, rest, vtophys(va), 504 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, 505 DBDMA_BRANCH_NEVER); 506 datalen -= rest; 507 va += rest; 508 cmdp++; 509 } 510 } 511 512 /* now va is page-aligned */ 513 while (datalen > PAGE_SIZE) { 514 DBDMA_BUILD(cmdp, cmd, 0, PAGE_SIZE, vtophys(va), 515 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 516 datalen -= PAGE_SIZE; 517 va += PAGE_SIZE; 518 cmdp++; 519 } 520 521 /* the last page (datalen <= PAGE_SIZE here) */ 522 cmd = read ? DBDMA_CMD_IN_LAST : DBDMA_CMD_OUT_LAST; 523 DBDMA_BUILD(cmdp, cmd, 0, datalen, vtophys(va), 524 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 525 cmdp++; 526 527 DBDMA_BUILD(cmdp, DBDMA_CMD_STOP, 0, 0, 0, 528 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 529 530 return 0; 531 } 532 533 void 534 wdc_obio_dma_start(v, channel, drive) 535 void *v; 536 int channel, drive; 537 { 538 struct wdc_obio_softc *sc = v; 539 540 dbdma_start(sc->sc_dmareg, sc->sc_dmacmd); 541 } 542 543 int 544 wdc_obio_dma_finish(v, channel, drive, read) 545 void *v; 546 int channel, drive; 547 int read; 548 { 549 struct wdc_obio_softc *sc = v; 550 551 dbdma_stop(sc->sc_dmareg); 552 return 0; 553 } 554