1 /* $NetBSD: wdc_obio.c,v 1.42 2005/12/11 12:18:03 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Onno van der Linden. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: wdc_obio.c,v 1.42 2005/12/11 12:18:03 christos Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/device.h> 45 #include <sys/malloc.h> 46 47 #include <uvm/uvm_extern.h> 48 49 #include <machine/bus.h> 50 #include <machine/autoconf.h> 51 52 #include <dev/ata/atareg.h> 53 #include <dev/ata/atavar.h> 54 #include <dev/ic/wdcvar.h> 55 56 #include <dev/ofw/openfirm.h> 57 58 #include <macppc/dev/dbdma.h> 59 60 #define WDC_REG_NPORTS 8 61 #define WDC_AUXREG_OFFSET 0x16 62 #define WDC_DEFAULT_PIO_IRQ 13 /* XXX */ 63 #define WDC_DEFAULT_DMA_IRQ 2 /* XXX */ 64 65 #define WDC_OPTIONS_DMA 0x01 66 67 /* 68 * XXX This code currently doesn't even try to allow 32-bit data port use. 69 */ 70 71 struct wdc_obio_softc { 72 struct wdc_softc sc_wdcdev; 73 struct ata_channel *sc_chanptr; 74 struct ata_channel sc_channel; 75 struct ata_queue sc_chqueue; 76 struct wdc_regs sc_wdc_regs; 77 dbdma_regmap_t *sc_dmareg; 78 dbdma_command_t *sc_dmacmd; 79 u_int sc_dmaconf[2]; /* per target value of CONFIG_REG */ 80 void *sc_ih; 81 }; 82 83 int wdc_obio_probe __P((struct device *, struct cfdata *, void *)); 84 void wdc_obio_attach __P((struct device *, struct device *, void *)); 85 int wdc_obio_detach __P((struct device *, int)); 86 int wdc_obio_dma_init __P((void *, int, int, void *, size_t, int)); 87 void wdc_obio_dma_start __P((void *, int, int)); 88 int wdc_obio_dma_finish __P((void *, int, int, int)); 89 90 static void wdc_obio_select __P((struct ata_channel *, int)); 91 static void adjust_timing __P((struct ata_channel *)); 92 static void ata4_adjust_timing __P((struct ata_channel *)); 93 94 CFATTACH_DECL(wdc_obio, sizeof(struct wdc_obio_softc), 95 wdc_obio_probe, wdc_obio_attach, wdc_obio_detach, wdcactivate); 96 97 int 98 wdc_obio_probe(parent, match, aux) 99 struct device *parent; 100 struct cfdata *match; 101 void *aux; 102 { 103 struct confargs *ca = aux; 104 char compat[32]; 105 106 /* XXX should not use name */ 107 if (strcmp(ca->ca_name, "ATA") == 0 || 108 strcmp(ca->ca_name, "ata") == 0 || 109 strcmp(ca->ca_name, "ata0") == 0 || 110 strcmp(ca->ca_name, "ide") == 0) 111 return 1; 112 113 memset(compat, 0, sizeof(compat)); 114 OF_getprop(ca->ca_node, "compatible", compat, sizeof(compat)); 115 if (strcmp(compat, "heathrow-ata") == 0 || 116 strcmp(compat, "keylargo-ata") == 0) 117 return 1; 118 119 return 0; 120 } 121 122 void 123 wdc_obio_attach(parent, self, aux) 124 struct device *parent, *self; 125 void *aux; 126 { 127 struct wdc_obio_softc *sc = (void *)self; 128 struct wdc_regs *wdr; 129 struct confargs *ca = aux; 130 struct ata_channel *chp = &sc->sc_channel; 131 int intr, i; 132 int use_dma = 0; 133 char path[80]; 134 135 if (sc->sc_wdcdev.sc_atac.atac_dev.dv_cfdata->cf_flags & WDC_OPTIONS_DMA) { 136 if (ca->ca_nreg >= 16 || ca->ca_nintr == -1) 137 use_dma = 1; /* XXX Don't work yet. */ 138 } 139 140 if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) { 141 intr = ca->ca_intr[0]; 142 printf(" irq %d", intr); 143 } else if (ca->ca_nintr == -1) { 144 intr = WDC_DEFAULT_PIO_IRQ; 145 printf(" irq property not found; using %d", intr); 146 } else { 147 printf(": couldn't get irq property\n"); 148 return; 149 } 150 151 if (use_dma) 152 printf(": DMA transfer"); 153 154 printf("\n"); 155 156 sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs; 157 158 wdr->cmd_iot = wdr->ctl_iot = 159 macppc_make_bus_space_tag(ca->ca_baseaddr + ca->ca_reg[0], 4); 160 161 if (bus_space_map(wdr->cmd_iot, 0, WDC_REG_NPORTS, 0, 162 &wdr->cmd_baseioh) || 163 bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 164 WDC_AUXREG_OFFSET, 1, &wdr->ctl_ioh)) { 165 printf("%s: couldn't map registers\n", 166 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 167 return; 168 } 169 for (i = 0; i < WDC_NREG; i++) { 170 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 171 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 172 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 173 WDC_REG_NPORTS); 174 printf("%s: couldn't subregion registers\n", 175 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 176 return; 177 } 178 } 179 #if 0 180 wdr->data32iot = wdr->cmd_iot; 181 wdr->data32ioh = wdr->cmd_ioh; 182 #endif 183 184 sc->sc_ih = intr_establish(intr, IST_LEVEL, IPL_BIO, wdcintr, chp); 185 186 if (use_dma) { 187 sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20); 188 sc->sc_dmareg = mapiodev(ca->ca_baseaddr + ca->ca_reg[2], 189 ca->ca_reg[3]); 190 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 191 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 192 if (strcmp(ca->ca_name, "ata-4") == 0) { 193 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 194 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; 195 sc->sc_wdcdev.sc_atac.atac_set_modes = ata4_adjust_timing; 196 } else { 197 sc->sc_wdcdev.sc_atac.atac_set_modes = adjust_timing; 198 } 199 #ifdef notyet 200 /* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */ 201 if (ohare) { 202 sc->sc_wdcdev.sc_atac.atac_pio_cap = 3; 203 sc->sc_wdcdev.sc_atac.atac_dma_cap = 1; 204 } 205 #endif 206 } else { 207 /* all non-DMA controllers can use adjust_timing */ 208 sc->sc_wdcdev.sc_atac.atac_set_modes = adjust_timing; 209 } 210 211 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 212 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 213 sc->sc_chanptr = chp; 214 sc->sc_wdcdev.sc_atac.atac_channels = &sc->sc_chanptr; 215 sc->sc_wdcdev.sc_atac.atac_nchannels = 1; 216 sc->sc_wdcdev.dma_arg = sc; 217 sc->sc_wdcdev.dma_init = wdc_obio_dma_init; 218 sc->sc_wdcdev.dma_start = wdc_obio_dma_start; 219 sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish; 220 chp->ch_channel = 0; 221 chp->ch_atac = &sc->sc_wdcdev.sc_atac; 222 chp->ch_queue = &sc->sc_chqueue; 223 224 wdc_init_shadow_regs(chp); 225 226 #define OHARE_FEATURE_REG 0xf3000038 227 228 /* XXX Enable wdc1 by feature reg. */ 229 memset(path, 0, sizeof(path)); 230 OF_package_to_path(ca->ca_node, path, sizeof(path)); 231 if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) { 232 u_int x; 233 234 x = in32rb(OHARE_FEATURE_REG); 235 x |= 8; 236 out32rb(OHARE_FEATURE_REG, x); 237 } 238 239 wdcattach(chp); 240 } 241 242 /* Multiword DMA transfer timings */ 243 struct ide_timings { 244 int cycle; /* minimum cycle time [ns] */ 245 int active; /* minimum command active time [ns] */ 246 }; 247 static struct ide_timings pio_timing[5] = { 248 { 600, 180 }, /* Mode 0 */ 249 { 390, 150 }, /* 1 */ 250 { 240, 105 }, /* 2 */ 251 { 180, 90 }, /* 3 */ 252 { 120, 75 } /* 4 */ 253 }; 254 static struct ide_timings dma_timing[3] = { 255 { 480, 240 }, /* Mode 0 */ 256 { 165, 90 }, /* Mode 1 */ 257 { 120, 75 } /* Mode 2 */ 258 }; 259 260 static struct ide_timings udma_timing[5] = { 261 {120, 180}, /* Mode 0 */ 262 { 90, 150}, /* Mode 1 */ 263 { 60, 120}, /* Mode 2 */ 264 { 45, 90}, /* Mode 3 */ 265 { 30, 90} /* Mode 4 */ 266 }; 267 268 #define TIME_TO_TICK(time) howmany((time), 30) 269 #define PIO_REC_OFFSET 4 270 #define PIO_REC_MIN 1 271 #define PIO_ACT_MIN 1 272 #define DMA_REC_OFFSET 1 273 #define DMA_REC_MIN 1 274 #define DMA_ACT_MIN 1 275 276 #define ATA4_TIME_TO_TICK(time) howmany((time), 15) /* 15 ns clock */ 277 278 #define CONFIG_REG (0x200 >> 4) /* IDE access timing register */ 279 280 void 281 wdc_obio_select(chp, drive) 282 struct ata_channel *chp; 283 int drive; 284 { 285 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_atac; 286 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(chp); 287 288 bus_space_write_4(wdr->cmd_iot, wdr->cmd_baseioh, 289 CONFIG_REG, sc->sc_dmaconf[drive]); 290 } 291 292 void 293 adjust_timing(chp) 294 struct ata_channel *chp; 295 { 296 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_atac; 297 int drive; 298 int min_cycle = 0, min_active = 0; 299 int cycle_tick = 0, act_tick = 0, inact_tick = 0, half_tick; 300 301 for (drive = 0; drive < 2; drive++) { 302 u_int conf = 0; 303 struct ata_drive_datas *drvp; 304 305 drvp = &chp->ch_drive[drive]; 306 /* set up pio mode timings */ 307 if (drvp->drive_flags & DRIVE) { 308 int piomode = drvp->PIO_mode; 309 min_cycle = pio_timing[piomode].cycle; 310 min_active = pio_timing[piomode].active; 311 312 cycle_tick = TIME_TO_TICK(min_cycle); 313 act_tick = TIME_TO_TICK(min_active); 314 if (act_tick < PIO_ACT_MIN) 315 act_tick = PIO_ACT_MIN; 316 inact_tick = cycle_tick - act_tick - PIO_REC_OFFSET; 317 if (inact_tick < PIO_REC_MIN) 318 inact_tick = PIO_REC_MIN; 319 /* mask: 0x000007ff */ 320 conf |= (inact_tick << 5) | act_tick; 321 } 322 /* Set up DMA mode timings */ 323 if (drvp->drive_flags & DRIVE_DMA) { 324 int dmamode = drvp->DMA_mode; 325 min_cycle = dma_timing[dmamode].cycle; 326 min_active = dma_timing[dmamode].active; 327 cycle_tick = TIME_TO_TICK(min_cycle); 328 act_tick = TIME_TO_TICK(min_active); 329 inact_tick = cycle_tick - act_tick - DMA_REC_OFFSET; 330 if (inact_tick < DMA_REC_MIN) 331 inact_tick = DMA_REC_MIN; 332 half_tick = 0; /* XXX */ 333 /* mask: 0xfffff800 */ 334 conf |= 335 (half_tick << 21) | 336 (inact_tick << 16) | (act_tick << 11); 337 } 338 #ifdef DEBUG 339 if (conf) { 340 printf("conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n", 341 drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick); 342 } 343 #endif 344 sc->sc_dmaconf[drive] = conf; 345 } 346 sc->sc_wdcdev.select = 0; 347 if (sc->sc_dmaconf[0]) { 348 wdc_obio_select(chp,0); 349 if (sc->sc_dmaconf[1] && 350 (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) { 351 sc->sc_wdcdev.select = wdc_obio_select; 352 } 353 } else if (sc->sc_dmaconf[1]) { 354 wdc_obio_select(chp,1); 355 } 356 } 357 358 void 359 ata4_adjust_timing(chp) 360 struct ata_channel *chp; 361 { 362 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_atac; 363 int drive; 364 int min_cycle = 0, min_active = 0; 365 int cycle_tick = 0, act_tick = 0, inact_tick = 0; 366 367 for (drive = 0; drive < 2; drive++) { 368 u_int conf = 0; 369 struct ata_drive_datas *drvp; 370 371 drvp = &chp->ch_drive[drive]; 372 /* set up pio mode timings */ 373 374 if (drvp->drive_flags & DRIVE) { 375 int piomode = drvp->PIO_mode; 376 min_cycle = pio_timing[piomode].cycle; 377 min_active = pio_timing[piomode].active; 378 379 cycle_tick = ATA4_TIME_TO_TICK(min_cycle); 380 act_tick = ATA4_TIME_TO_TICK(min_active); 381 inact_tick = cycle_tick - act_tick; 382 /* mask: 0x000003ff */ 383 conf |= (inact_tick << 5) | act_tick; 384 } 385 /* set up dma mode timings */ 386 if (drvp->drive_flags & DRIVE_DMA) { 387 int dmamode = drvp->DMA_mode; 388 min_cycle = dma_timing[dmamode].cycle; 389 min_active = dma_timing[dmamode].active; 390 cycle_tick = ATA4_TIME_TO_TICK(min_cycle); 391 act_tick = ATA4_TIME_TO_TICK(min_active); 392 inact_tick = cycle_tick - act_tick; 393 /* mask: 0x001ffc00 */ 394 conf |= (act_tick << 10) | (inact_tick << 15); 395 } 396 /* set up udma mode timings */ 397 if (drvp->drive_flags & DRIVE_UDMA) { 398 int udmamode = drvp->UDMA_mode; 399 min_cycle = udma_timing[udmamode].cycle; 400 min_active = udma_timing[udmamode].active; 401 act_tick = ATA4_TIME_TO_TICK(min_active); 402 cycle_tick = ATA4_TIME_TO_TICK(min_cycle); 403 /* mask: 0x1ff00000 */ 404 conf |= (cycle_tick << 21) | (act_tick << 25) | 0x100000; 405 } 406 #ifdef DEBUG 407 if (conf) { 408 printf("ata4 conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n", 409 drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick); 410 } 411 #endif 412 sc->sc_dmaconf[drive] = conf; 413 } 414 sc->sc_wdcdev.select = 0; 415 if (sc->sc_dmaconf[0]) { 416 wdc_obio_select(chp,0); 417 if (sc->sc_dmaconf[1] && 418 (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) { 419 sc->sc_wdcdev.select = wdc_obio_select; 420 } 421 } else if (sc->sc_dmaconf[1]) { 422 wdc_obio_select(chp,1); 423 } 424 } 425 426 int 427 wdc_obio_detach(self, flags) 428 struct device *self; 429 int flags; 430 { 431 struct wdc_obio_softc *sc = (void *)self; 432 int error; 433 434 if ((error = wdcdetach(self, flags)) != 0) 435 return error; 436 437 intr_disestablish(sc->sc_ih); 438 439 /* Unmap our i/o space. */ 440 bus_space_unmap(sc->sc_wdcdev.regs->cmd_iot, 441 sc->sc_wdcdev.regs->cmd_ioh, WDC_REG_NPORTS); 442 443 /* Unmap DMA registers. */ 444 /* XXX unmapiodev(sc->sc_dmareg); */ 445 /* XXX free(sc->sc_dmacmd); */ 446 447 return 0; 448 } 449 450 int 451 wdc_obio_dma_init(v, channel, drive, databuf, datalen, flags) 452 void *v; 453 void *databuf; 454 size_t datalen; 455 int flags; 456 { 457 struct wdc_obio_softc *sc = v; 458 vaddr_t va = (vaddr_t)databuf; 459 dbdma_command_t *cmdp; 460 u_int cmd, offset; 461 int read = flags & WDC_DMA_READ; 462 463 cmdp = sc->sc_dmacmd; 464 cmd = read ? DBDMA_CMD_IN_MORE : DBDMA_CMD_OUT_MORE; 465 466 offset = va & PGOFSET; 467 468 /* if va is not page-aligned, setup the first page */ 469 if (offset != 0) { 470 int rest = PAGE_SIZE - offset; /* the rest of the page */ 471 472 if (datalen > rest) { /* if continues to next page */ 473 DBDMA_BUILD(cmdp, cmd, 0, rest, vtophys(va), 474 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, 475 DBDMA_BRANCH_NEVER); 476 datalen -= rest; 477 va += rest; 478 cmdp++; 479 } 480 } 481 482 /* now va is page-aligned */ 483 while (datalen > PAGE_SIZE) { 484 DBDMA_BUILD(cmdp, cmd, 0, PAGE_SIZE, vtophys(va), 485 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 486 datalen -= PAGE_SIZE; 487 va += PAGE_SIZE; 488 cmdp++; 489 } 490 491 /* the last page (datalen <= PAGE_SIZE here) */ 492 cmd = read ? DBDMA_CMD_IN_LAST : DBDMA_CMD_OUT_LAST; 493 DBDMA_BUILD(cmdp, cmd, 0, datalen, vtophys(va), 494 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 495 cmdp++; 496 497 DBDMA_BUILD(cmdp, DBDMA_CMD_STOP, 0, 0, 0, 498 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 499 500 return 0; 501 } 502 503 void 504 wdc_obio_dma_start(v, channel, drive) 505 void *v; 506 int channel, drive; 507 { 508 struct wdc_obio_softc *sc = v; 509 510 dbdma_start(sc->sc_dmareg, sc->sc_dmacmd); 511 } 512 513 int 514 wdc_obio_dma_finish(v, channel, drive, read) 515 void *v; 516 int channel, drive; 517 int read; 518 { 519 struct wdc_obio_softc *sc = v; 520 521 dbdma_stop(sc->sc_dmareg); 522 return 0; 523 } 524