1 /* $NetBSD: dma.c,v 1.19 2007/03/04 13:59:47 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 1994 Paul Kranenburg. All rights reserved. 5 * Copyright (c) 1994 Peter Galbavy. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Peter Galbavy. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.19 2007/03/04 13:59:47 tsutsui Exp $"); 35 36 #include <sys/types.h> 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/errno.h> 41 #include <sys/device.h> 42 #include <sys/malloc.h> 43 44 #include <machine/autoconf.h> 45 #include <machine/dvma.h> 46 47 #include <dev/scsipi/scsi_all.h> 48 #include <dev/scsipi/scsipi_all.h> 49 #include <dev/scsipi/scsiconf.h> 50 51 #include <dev/ic/ncr53c9xreg.h> 52 #include <dev/ic/ncr53c9xvar.h> 53 54 #include <sun3/dev/dmareg.h> 55 #include <sun3/dev/dmavar.h> 56 57 #define MAX_DMA_SZ 0x01000000 /* 16MB */ 58 59 static int dmamatch (struct device *, struct cfdata *, void *); 60 static void dmaattach(struct device *, struct device *, void *); 61 62 CFATTACH_DECL(dma, sizeof(struct dma_softc), 63 dmamatch, dmaattach, NULL, NULL); 64 65 extern struct cfdriver dma_cd; 66 67 static int 68 dmamatch(struct device *parent, struct cfdata *cf, void *aux) 69 { 70 struct confargs *ca = aux; 71 72 /* 73 * Check for the DMA registers. 74 */ 75 if (bus_peek(ca->ca_bustype, ca->ca_paddr, 4) == -1) 76 return (0); 77 78 /* If default ipl, fill it in. */ 79 if (ca->ca_intpri == -1) 80 ca->ca_intpri = 2; 81 82 return (1); 83 } 84 85 static void 86 dmaattach(struct device *parent, struct device *self, void *aux) 87 { 88 struct confargs *ca = aux; 89 struct dma_softc *sc = (void *)self; 90 int id; 91 92 #if 0 93 /* indirect functions */ 94 sc->intr = espdmaintr; 95 sc->setup = dma_setup; 96 sc->reset = dma_reset; 97 #endif 98 99 /* 100 * Map in the registers. 101 */ 102 sc->sc_bst = ca->ca_bustag; 103 sc->sc_dmatag = ca->ca_dmatag; 104 if (bus_space_map(sc->sc_bst, ca->ca_paddr, DMAREG_SIZE, 105 0, &sc->sc_bsh) != 0) { 106 printf(": can't map register\n"); 107 return; 108 } 109 /* 110 * Allocate dmamap. 111 */ 112 if (bus_dmamap_create(sc->sc_dmatag, MAXPHYS, 1, MAXPHYS, 113 0, BUS_DMA_NOWAIT, &sc->sc_dmamap) != 0) { 114 printf(": can't create DMA map\n"); 115 return; 116 } 117 118 sc->sc_rev = DMA_GCSR(sc) & D_DEV_ID; 119 id = (sc->sc_rev >> 28) & 0xf; 120 printf(": rev %d\n", id); 121 122 /* 123 * Make sure the DMA chip is supported revision. 124 * The Sun3/80 used only the old rev zero chip, 125 * so the initialization has been simplified. 126 */ 127 switch (sc->sc_rev) { 128 case DMAREV_0: 129 case DMAREV_1: 130 break; 131 default: 132 panic("unsupported dma rev"); 133 } 134 } 135 136 /* 137 * This is called by espattach to get our softc. 138 */ 139 struct dma_softc * 140 espdmafind(int unit) 141 { 142 if (unit < 0 || unit >= dma_cd.cd_ndevs || 143 dma_cd.cd_devs[unit] == NULL) 144 panic("no dma"); 145 return (dma_cd.cd_devs[unit]); 146 } 147 148 #define DMAWAIT(SC, COND, MSG, DONTPANIC) do if (COND) { \ 149 int count = 100000; \ 150 while ((COND) && --count > 0) \ 151 DELAY(5); \ 152 if (count == 0) { \ 153 printf("%s: line %d: CSR = 0x%x\n", \ 154 __FILE__, __LINE__, DMA_GCSR(SC)); \ 155 if (DONTPANIC) \ 156 printf(MSG); \ 157 else \ 158 panic(MSG); \ 159 } \ 160 } while (/* CONSTCOND */0) 161 162 #define DMA_DRAIN(sc, dontpanic) do { \ 163 uint32_t _csr; \ 164 /* \ 165 * DMA rev0 & rev1: we are not allowed to touch the DMA "flush" \ 166 * and "drain" bits while it is still thinking about a \ 167 * request. \ 168 * other revs: D_R_PEND bit reads as 0 \ 169 */ \ 170 DMAWAIT(sc, DMA_GCSR(sc) & D_R_PEND, "R_PEND", dontpanic); \ 171 /* \ 172 * Select drain bit (always rev 0,1) \ 173 * also clears errors and D_TC flag \ 174 */ \ 175 _csr = DMA_GCSR(sc); \ 176 _csr |= D_DRAIN; \ 177 DMA_SCSR(sc, _csr); \ 178 /* \ 179 * Wait for draining to finish \ 180 */ \ 181 DMAWAIT(sc, DMA_GCSR(sc) & D_PACKCNT, "DRAINING", dontpanic); \ 182 } while (/* CONSTCOND */0) 183 184 #define DMA_FLUSH(sc, dontpanic) do { \ 185 uint32_t _csr; \ 186 /* \ 187 * DMA rev0 & rev1: we are not allowed to touch the DMA "flush" \ 188 * and "drain" bits while it is still thinking about a \ 189 * request. \ 190 * other revs: D_R_PEND bit reads as 0 \ 191 */ \ 192 DMAWAIT(sc, DMA_GCSR(sc) & D_R_PEND, "R_PEND", dontpanic); \ 193 _csr = DMA_GCSR(sc); \ 194 _csr &= ~(D_WRITE|D_EN_DMA); \ 195 DMA_SCSR(sc, _csr); \ 196 _csr |= D_FLUSH; \ 197 DMA_SCSR(sc, _csr); \ 198 } while (/* CONSTCOND */0) 199 200 void 201 dma_reset(struct dma_softc *sc) 202 { 203 uint32_t csr; 204 205 if (sc->sc_dmamap->dm_nsegs > 0) 206 bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap); 207 208 DMA_FLUSH(sc, 1); 209 csr = DMA_GCSR(sc); 210 211 csr |= D_RESET; /* reset DMA */ 212 DMA_SCSR(sc, csr); 213 DELAY(200); /* what should this be ? */ 214 215 /*DMAWAIT1(sc); why was this here? */ 216 csr = DMA_GCSR(sc); 217 csr &= ~D_RESET; /* de-assert reset line */ 218 DMA_SCSR(sc, csr); 219 DELAY(5); /* allow a few ticks to settle */ 220 221 /* 222 * Get transfer burst size from (?) and plug it into the 223 * controller registers. This is needed on the Sun4m... 224 * Do we need it too? Apparently not, because the 3/80 225 * always has the old, REV zero DMA chip. 226 */ 227 csr = DMA_GCSR(sc); 228 csr |= D_INT_EN; /* enable interrupts */ 229 230 DMA_SCSR(sc, csr); 231 232 sc->sc_active = 0; 233 } 234 235 236 #define DMAMAX(a) (MAX_DMA_SZ - ((a) & (MAX_DMA_SZ-1))) 237 238 /* 239 * setup a dma transfer 240 */ 241 int 242 dma_setup(struct dma_softc *sc, void **addr, size_t *len, int datain, 243 size_t *dmasize) 244 { 245 uint32_t csr; 246 247 DMA_FLUSH(sc, 0); 248 249 #if 0 250 DMA_SCSR(sc, DMA_GCSR(sc) & ~D_INT_EN); 251 #endif 252 sc->sc_dmaaddr = addr; 253 sc->sc_dmalen = len; 254 255 NCR_DMA(("%s: start %d@%p,%d\n", sc->sc_dev.dv_xname, 256 *sc->sc_dmalen, *sc->sc_dmaaddr, datain ? 1 : 0)); 257 258 /* 259 * the rules say we cannot transfer more than the limit 260 * of this DMA chip (64k for old and 16Mb for new), 261 * and we cannot cross a 16Mb boundary. 262 */ 263 *dmasize = sc->sc_dmasize = 264 min(*dmasize, DMAMAX((size_t) *sc->sc_dmaaddr)); 265 266 NCR_DMA(("dma_setup: dmasize = %d\n", sc->sc_dmasize)); 267 268 /* Program the DMA address */ 269 if (sc->sc_dmasize) { 270 if (bus_dmamap_load(sc->sc_dmatag, sc->sc_dmamap, 271 *sc->sc_dmaaddr, sc->sc_dmasize, 272 NULL /* kernel address */, BUS_DMA_NOWAIT)) 273 panic("%s: cannot allocate DVMA address", 274 sc->sc_dev.dv_xname); 275 bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize, 276 datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 277 bus_space_write_4(sc->sc_bst, sc->sc_bsh, DMA_REG_ADDR, 278 sc->sc_dmamap->dm_segs[0].ds_addr); 279 } 280 281 /* We never have DMAREV_ESC. */ 282 283 /* Setup DMA control register */ 284 csr = DMA_GCSR(sc); 285 if (datain) 286 csr |= D_WRITE; 287 else 288 csr &= ~D_WRITE; 289 csr |= D_INT_EN; 290 DMA_SCSR(sc, csr); 291 292 return 0; 293 } 294 295 /* 296 * Pseudo (chained) interrupt from the esp driver to kick the 297 * current running DMA transfer. I am relying on espintr() to 298 * pickup and clean errors for now 299 * 300 * return 1 if it was a DMA continue. 301 */ 302 int 303 espdmaintr(struct dma_softc *sc) 304 { 305 struct ncr53c9x_softc *nsc = sc->sc_client; 306 char bits[64]; 307 int trans, resid; 308 uint32_t csr; 309 310 csr = DMA_GCSR(sc); 311 312 NCR_DMA(("%s: intr: addr 0x%x, csr %s\n", 313 sc->sc_dev.dv_xname, DMADDR(sc), 314 bitmask_snprintf(csr, DMACSRBITS, bits, sizeof(bits)))); 315 316 if (csr & D_ERR_PEND) { 317 printf("%s: error: csr=%s\n", sc->sc_dev.dv_xname, 318 bitmask_snprintf(csr, DMACSRBITS, bits, sizeof(bits))); 319 csr &= ~D_EN_DMA; /* Stop DMA */ 320 DMA_SCSR(sc, csr); 321 csr |= D_FLUSH; 322 DMA_SCSR(sc, csr); 323 return -1; 324 } 325 326 /* This is an "assertion" :) */ 327 if (sc->sc_active == 0) 328 panic("dmaintr: DMA wasn't active"); 329 330 DMA_DRAIN(sc, 0); 331 332 /* DMA has stopped */ 333 csr &= ~D_EN_DMA; 334 DMA_SCSR(sc, csr); 335 sc->sc_active = 0; 336 337 if (sc->sc_dmasize == 0) { 338 /* A "Transfer Pad" operation completed */ 339 NCR_DMA(("dmaintr: discarded %d bytes (tcl=%d, tcm=%d)\n", 340 NCR_READ_REG(nsc, NCR_TCL) | 341 (NCR_READ_REG(nsc, NCR_TCM) << 8), 342 NCR_READ_REG(nsc, NCR_TCL), 343 NCR_READ_REG(nsc, NCR_TCM))); 344 return 0; 345 } 346 347 resid = 0; 348 /* 349 * If a transfer onto the SCSI bus gets interrupted by the device 350 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts 351 * as residual since the ESP counter registers get decremented as 352 * bytes are clocked into the FIFO. 353 */ 354 if (!(csr & D_WRITE) && 355 (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) { 356 NCR_DMA(("dmaintr: empty esp FIFO of %d ", resid)); 357 } 358 359 if ((nsc->sc_espstat & NCRSTAT_TC) == 0) { 360 /* 361 * `Terminal count' is off, so read the residue 362 * out of the ESP counter registers. 363 */ 364 resid += (NCR_READ_REG(nsc, NCR_TCL) | 365 (NCR_READ_REG(nsc, NCR_TCM) << 8) | 366 ((nsc->sc_cfg2 & NCRCFG2_FE) 367 ? (NCR_READ_REG(nsc, NCR_TCH) << 16) 368 : 0)); 369 370 if (resid == 0 && sc->sc_dmasize == 65536 && 371 (nsc->sc_cfg2 & NCRCFG2_FE) == 0) 372 /* A transfer of 64K is encoded as `TCL=TCM=0' */ 373 resid = 65536; 374 } 375 376 trans = sc->sc_dmasize - resid; 377 if (trans < 0) { /* transferred < 0 ? */ 378 #if 0 379 /* 380 * This situation can happen in perfectly normal operation 381 * if the ESP is reselected while using DMA to select 382 * another target. As such, don't print the warning. 383 */ 384 printf("%s: xfer (%d) > req (%d)\n", 385 sc->sc_dev.dv_xname, trans, sc->sc_dmasize); 386 #endif 387 trans = sc->sc_dmasize; 388 } 389 390 NCR_DMA(("dmaintr: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n", 391 NCR_READ_REG(nsc, NCR_TCL), 392 NCR_READ_REG(nsc, NCR_TCM), 393 (nsc->sc_cfg2 & NCRCFG2_FE) 394 ? NCR_READ_REG(nsc, NCR_TCH) : 0, 395 trans, resid)); 396 397 #ifdef SUN3X_470_EVENTUALLY 398 if (csr & D_WRITE) 399 cache_flush(*sc->sc_dmaaddr, trans); 400 #endif 401 402 if (sc->sc_dmamap->dm_nsegs > 0) { 403 bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize, 404 (csr & D_WRITE) != 0 ? 405 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 406 bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap); 407 } 408 409 *sc->sc_dmalen -= trans; 410 *sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans; 411 412 #if 0 /* this is not normal operation just yet */ 413 if (*sc->sc_dmalen == 0 || 414 nsc->sc_phase != nsc->sc_prevphase) 415 return 0; 416 417 /* and again */ 418 dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMA_GCSR(sc) & D_WRITE); 419 return 1; 420 #endif 421 return 0; 422 } 423