1 /* $NetBSD: pcscp.c,v 1.16 2001/07/19 16:36:16 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center; Izumi Tsutsui. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * pcscp.c: device dependent code for AMD Am53c974 (PCscsi-PCI) 42 * written by Izumi Tsutsui <tsutsui@ceres.dti.ne.jp> 43 * 44 * Technical manual available at 45 * http://www.amd.com/products/npd/techdocs/techdocs.html 46 */ 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/device.h> 51 #include <sys/buf.h> 52 53 #include <machine/bus.h> 54 #include <machine/intr.h> 55 #include <machine/endian.h> 56 57 #include <uvm/uvm_extern.h> 58 59 #include <dev/scsipi/scsipi_all.h> 60 #include <dev/scsipi/scsi_all.h> 61 #include <dev/scsipi/scsiconf.h> 62 63 #include <dev/pci/pcireg.h> 64 #include <dev/pci/pcivar.h> 65 #include <dev/pci/pcidevs.h> 66 67 #include <dev/ic/ncr53c9xreg.h> 68 #include <dev/ic/ncr53c9xvar.h> 69 70 #include <dev/pci/pcscpreg.h> 71 72 #define IO_MAP_REG 0x10 73 #define MEM_MAP_REG 0x14 74 75 struct pcscp_softc { 76 struct ncr53c9x_softc sc_ncr53c9x; /* glue to MI code */ 77 78 bus_space_tag_t sc_st; /* bus space tag */ 79 bus_space_handle_t sc_sh; /* bus space handle */ 80 void *sc_ih; /* interrupt cookie */ 81 82 bus_dma_tag_t sc_dmat; /* DMA tag */ 83 84 bus_dmamap_t sc_xfermap; /* DMA map for transfers */ 85 86 u_int32_t *sc_mdladdr; /* MDL array */ 87 bus_dmamap_t sc_mdldmap; /* MDL DMA map */ 88 89 int sc_active; /* DMA state */ 90 int sc_datain; /* DMA Data Direction */ 91 size_t sc_dmasize; /* DMA size */ 92 char **sc_dmaaddr; /* DMA address */ 93 size_t *sc_dmalen; /* DMA length */ 94 }; 95 96 #define READ_DMAREG(sc, reg) \ 97 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 98 #define WRITE_DMAREG(sc, reg, var) \ 99 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (var)) 100 101 /* don't have to use MI defines in MD code... */ 102 #undef NCR_READ_REG 103 #define NCR_READ_REG(sc, reg) pcscp_read_reg((sc), (reg)) 104 #undef NCR_WRITE_REG 105 #define NCR_WRITE_REG(sc, reg, val) pcscp_write_reg((sc), (reg), (val)) 106 107 int pcscp_match __P((struct device *, struct cfdata *, void *)); 108 void pcscp_attach __P((struct device *, struct device *, void *)); 109 110 struct cfattach pcscp_ca = { 111 sizeof(struct pcscp_softc), pcscp_match, pcscp_attach 112 }; 113 114 /* 115 * Functions and the switch for the MI code. 116 */ 117 118 u_char pcscp_read_reg __P((struct ncr53c9x_softc *, int)); 119 void pcscp_write_reg __P((struct ncr53c9x_softc *, int, u_char)); 120 int pcscp_dma_isintr __P((struct ncr53c9x_softc *)); 121 void pcscp_dma_reset __P((struct ncr53c9x_softc *)); 122 int pcscp_dma_intr __P((struct ncr53c9x_softc *)); 123 int pcscp_dma_setup __P((struct ncr53c9x_softc *, caddr_t *, 124 size_t *, int, size_t *)); 125 void pcscp_dma_go __P((struct ncr53c9x_softc *)); 126 void pcscp_dma_stop __P((struct ncr53c9x_softc *)); 127 int pcscp_dma_isactive __P((struct ncr53c9x_softc *)); 128 129 struct ncr53c9x_glue pcscp_glue = { 130 pcscp_read_reg, 131 pcscp_write_reg, 132 pcscp_dma_isintr, 133 pcscp_dma_reset, 134 pcscp_dma_intr, 135 pcscp_dma_setup, 136 pcscp_dma_go, 137 pcscp_dma_stop, 138 pcscp_dma_isactive, 139 NULL, /* gl_clear_latched_intr */ 140 }; 141 142 int 143 pcscp_match(parent, match, aux) 144 struct device *parent; 145 struct cfdata *match; 146 void *aux; 147 { 148 struct pci_attach_args *pa = aux; 149 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_AMD) 150 return 0; 151 152 switch (PCI_PRODUCT(pa->pa_id)) { 153 case PCI_PRODUCT_AMD_PCSCSI_PCI: 154 #if 0 155 case PCI_PRODUCT_AMD_PCNETS_PCI: 156 #endif 157 return 1; 158 } 159 return 0; 160 } 161 162 /* 163 * Attach this instance, and then all the sub-devices 164 */ 165 void 166 pcscp_attach(parent, self, aux) 167 struct device *parent, *self; 168 void *aux; 169 { 170 struct pci_attach_args *pa = aux; 171 struct pcscp_softc *esc = (void *)self; 172 struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x; 173 bus_space_tag_t st, iot, memt; 174 bus_space_handle_t sh, ioh, memh; 175 int ioh_valid, memh_valid; 176 pci_intr_handle_t ih; 177 const char *intrstr; 178 pcireg_t csr; 179 bus_dma_segment_t seg; 180 int error, rseg; 181 182 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG, 183 PCI_MAPREG_TYPE_IO, 0, 184 &iot, &ioh, NULL, NULL) == 0); 185 #if 0 /* XXX cannot use memory map? */ 186 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG, 187 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 188 &memt, &memh, NULL, NULL) == 0); 189 #else 190 memh_valid = 0; 191 #endif 192 193 if (memh_valid) { 194 st = memt; 195 sh = memh; 196 } else if (ioh_valid) { 197 st = iot; 198 sh = ioh; 199 } else { 200 printf(": unable to map registers\n"); 201 return; 202 } 203 printf("\n"); 204 205 sc->sc_glue = &pcscp_glue; 206 207 esc->sc_st = st; 208 esc->sc_sh = sh; 209 esc->sc_dmat = pa->pa_dmat; 210 211 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 212 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 213 csr | PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE); 214 215 /* 216 * XXX More of this should be in ncr53c9x_attach(), but 217 * XXX should we really poke around the chip that much in 218 * XXX the MI code? Think about this more... 219 */ 220 221 /* 222 * Set up static configuration info. 223 */ 224 225 /* 226 * XXX should read configuration from EEPROM? 227 * 228 * MI ncr53c9x driver does not support configuration 229 * per each target device, though... 230 */ 231 sc->sc_id = 7; 232 sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB; 233 sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_FE; 234 sc->sc_cfg3 = NCRAMDCFG3_IDM | NCRAMDCFG3_FCLK; 235 sc->sc_cfg4 = NCRAMDCFG4_GE12NS | NCRAMDCFG4_RADE; 236 sc->sc_rev = NCR_VARIANT_AM53C974; 237 sc->sc_features = NCR_F_FASTSCSI; 238 sc->sc_cfg3_fscsi = NCRAMDCFG3_FSCSI; 239 sc->sc_freq = 40; /* MHz */ 240 241 /* 242 * XXX minsync and maxxfer _should_ be set up in MI code, 243 * XXX but it appears to have some dependency on what sort 244 * XXX of DMA we're hooked up to, etc. 245 */ 246 247 /* 248 * This is the value used to start sync negotiations 249 * Note that the NCR register "SYNCTP" is programmed 250 * in "clocks per byte", and has a minimum value of 4. 251 * The SCSI period used in negotiation is one-fourth 252 * of the time (in nanoseconds) needed to transfer one byte. 253 * Since the chip's clock is given in MHz, we have the following 254 * formula: 4 * period = (1000 / freq) * 4 255 */ 256 257 sc->sc_minsync = 1000 / sc->sc_freq; 258 259 /* Really no limit, but since we want to fit into the TCR... */ 260 sc->sc_maxxfer = 16 * 1024 * 1024; 261 262 /* map and establish interrupt */ 263 if (pci_intr_map(pa, &ih)) { 264 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname); 265 return; 266 } 267 268 intrstr = pci_intr_string(pa->pa_pc, ih); 269 esc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, 270 ncr53c9x_intr, esc); 271 if (esc->sc_ih == NULL) { 272 printf("%s: couldn't establish interrupt", sc->sc_dev.dv_xname); 273 if (intrstr != NULL) 274 printf(" at %s", intrstr); 275 printf("\n"); 276 return; 277 } 278 if (intrstr != NULL) 279 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, 280 intrstr); 281 282 /* 283 * Create the DMA maps for the data transfers. 284 */ 285 286 #define MDL_SEG_SIZE 0x1000 /* 4kbyte per segment */ 287 #define MDL_SEG_OFFSET 0x0FFF 288 #define MDL_SIZE (MAXPHYS / MDL_SEG_SIZE + 1) /* no hardware limit? */ 289 290 if (bus_dmamap_create(esc->sc_dmat, MAXPHYS, MDL_SIZE, MAXPHYS, 0, 291 BUS_DMA_NOWAIT, &esc->sc_xfermap)) { 292 printf("%s: can't create dma maps\n", sc->sc_dev.dv_xname); 293 return; 294 } 295 296 /* 297 * Allocate and map memory for the MDL. 298 */ 299 300 if ((error = bus_dmamem_alloc(esc->sc_dmat, 301 sizeof(u_int32_t) * MDL_SIZE, PAGE_SIZE, 0, &seg, 1, &rseg, 302 BUS_DMA_NOWAIT)) != 0) { 303 printf("%s: unable to allocate memory for the MDL, " 304 "error = %d\n", sc->sc_dev.dv_xname, error); 305 return; 306 } 307 if ((error = bus_dmamem_map(esc->sc_dmat, &seg, rseg, 308 sizeof(u_int32_t) * MDL_SIZE , (caddr_t *)&esc->sc_mdladdr, 309 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 310 printf("%s: unable to map the MDL memory, error = %d\n", 311 sc->sc_dev.dv_xname, error); 312 return; 313 } 314 if ((error = bus_dmamap_create(esc->sc_dmat, 315 sizeof(u_int32_t) * MDL_SIZE, 1, sizeof(u_int32_t) * MDL_SIZE, 316 0, BUS_DMA_NOWAIT, &esc->sc_mdldmap)) != 0) { 317 printf("%s: unable to map_create for the MDL, error = %d\n", 318 sc->sc_dev.dv_xname, error); 319 return; 320 } 321 if ((error = bus_dmamap_load(esc->sc_dmat, esc->sc_mdldmap, 322 esc->sc_mdladdr, sizeof(u_int32_t) * MDL_SIZE, 323 NULL, BUS_DMA_NOWAIT)) != 0) { 324 printf("%s: unable to load for the MDL, error = %d\n", 325 sc->sc_dev.dv_xname, error); 326 return; 327 } 328 329 /* Do the common parts of attachment. */ 330 printf("%s", sc->sc_dev.dv_xname); 331 332 sc->sc_adapter.adapt_minphys = minphys; 333 sc->sc_adapter.adapt_request = ncr53c9x_scsipi_request; 334 ncr53c9x_attach(sc); 335 336 /* Turn on target selection using the `dma' method */ 337 sc->sc_features |= NCR_F_DMASELECT; 338 } 339 340 /* 341 * Glue functions. 342 */ 343 344 u_char 345 pcscp_read_reg(sc, reg) 346 struct ncr53c9x_softc *sc; 347 int reg; 348 { 349 struct pcscp_softc *esc = (struct pcscp_softc *)sc; 350 351 return bus_space_read_1(esc->sc_st, esc->sc_sh, reg << 2); 352 } 353 354 void 355 pcscp_write_reg(sc, reg, v) 356 struct ncr53c9x_softc *sc; 357 int reg; 358 u_char v; 359 { 360 struct pcscp_softc *esc = (struct pcscp_softc *)sc; 361 362 bus_space_write_1(esc->sc_st, esc->sc_sh, reg << 2, v); 363 } 364 365 int 366 pcscp_dma_isintr(sc) 367 struct ncr53c9x_softc *sc; 368 { 369 370 return NCR_READ_REG(sc, NCR_STAT) & NCRSTAT_INT; 371 } 372 373 void 374 pcscp_dma_reset(sc) 375 struct ncr53c9x_softc *sc; 376 { 377 struct pcscp_softc *esc = (struct pcscp_softc *)sc; 378 379 WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE); 380 381 esc->sc_active = 0; 382 } 383 384 int 385 pcscp_dma_intr(sc) 386 struct ncr53c9x_softc *sc; 387 { 388 struct pcscp_softc *esc = (struct pcscp_softc *)sc; 389 int trans, resid, i; 390 bus_dmamap_t dmap = esc->sc_xfermap; 391 int datain = esc->sc_datain; 392 u_int32_t dmastat; 393 char *p = NULL; 394 395 dmastat = READ_DMAREG(esc, DMA_STAT); 396 397 if (dmastat & DMASTAT_ERR) { 398 /* XXX not tested... */ 399 WRITE_DMAREG(esc, DMA_CMD, 400 DMACMD_ABORT | (datain ? DMACMD_DIR : 0)); 401 402 printf("%s: error: DMA error detected; Aborting.\n", 403 sc->sc_dev.dv_xname); 404 bus_dmamap_unload(esc->sc_dmat, dmap); 405 return -1; 406 } 407 408 if (dmastat & DMASTAT_ABT) { 409 /* XXX What should be done? */ 410 printf("%s: dma_intr: DMA aborted.\n", sc->sc_dev.dv_xname); 411 WRITE_DMAREG(esc, DMA_CMD, 412 DMACMD_IDLE | (datain ? DMACMD_DIR : 0)); 413 esc->sc_active = 0; 414 return 0; 415 } 416 417 /* This is an "assertion" :) */ 418 if (esc->sc_active == 0) 419 panic("pcscp dmaintr: DMA wasn't active"); 420 421 /* DMA has stopped */ 422 423 esc->sc_active = 0; 424 425 if (esc->sc_dmasize == 0) { 426 /* A "Transfer Pad" operation completed */ 427 NCR_DMA(("dmaintr: discarded %d bytes (tcl=%d, tcm=%d)\n", 428 NCR_READ_REG(sc, NCR_TCL) | 429 (NCR_READ_REG(sc, NCR_TCM) << 8), 430 NCR_READ_REG(sc, NCR_TCL), 431 NCR_READ_REG(sc, NCR_TCM))); 432 return 0; 433 } 434 435 resid = 0; 436 /* 437 * If a transfer onto the SCSI bus gets interrupted by the device 438 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts 439 * as residual since the ESP counter registers get decremented as 440 * bytes are clocked into the FIFO. 441 */ 442 if (!datain && 443 (resid = (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF)) != 0) { 444 NCR_DMA(("pcscp_dma_intr: empty esp FIFO of %d ", resid)); 445 } 446 447 if ((sc->sc_espstat & NCRSTAT_TC) == 0) { 448 /* 449 * `Terminal count' is off, so read the residue 450 * out of the ESP counter registers. 451 */ 452 if (datain) { 453 resid = NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF; 454 while (resid > 1) 455 resid = 456 NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF; 457 WRITE_DMAREG(esc, DMA_CMD, DMACMD_BLAST | DMACMD_MDL | 458 (datain ? DMACMD_DIR : 0)); 459 460 for (i = 0; i < 0x8000; i++) /* XXX 0x8000 ? */ 461 if (READ_DMAREG(esc, DMA_STAT) & DMASTAT_BCMP) 462 break; 463 464 /* See the below comments... */ 465 if (resid) 466 p = *esc->sc_dmaaddr; 467 } 468 469 resid += (NCR_READ_REG(sc, NCR_TCL) | 470 (NCR_READ_REG(sc, NCR_TCM) << 8) | 471 ((sc->sc_cfg2 & NCRCFG2_FE) 472 ? (NCR_READ_REG(sc, NCR_TCH) << 16) : 0)); 473 474 if (resid == 0 && esc->sc_dmasize == 65536 && 475 (sc->sc_cfg2 & NCRCFG2_FE) == 0) 476 /* A transfer of 64K is encoded as `TCL=TCM=0' */ 477 resid = 65536; 478 } else { 479 while((dmastat & DMASTAT_DONE) == 0) 480 dmastat = READ_DMAREG(esc, DMA_STAT); 481 } 482 483 WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain ? DMACMD_DIR : 0)); 484 485 bus_dmamap_sync(esc->sc_dmat, dmap, 0, dmap->dm_mapsize, 486 datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 487 bus_dmamap_unload(esc->sc_dmat, dmap); 488 489 trans = esc->sc_dmasize - resid; 490 491 /* 492 * From the technical manual notes: 493 * 494 * `In some odd byte conditions, one residual byte will be left 495 * in the SCSI FIFO, and the FIFO flags will never count to 0. 496 * When this happens, the residual byte should be retrieved 497 * via PIO following completion of the BLAST operation.' 498 */ 499 500 if (p) { 501 p += trans; 502 *p = NCR_READ_REG(sc, NCR_FIFO); 503 trans++; 504 } 505 506 if (trans < 0) { /* transferred < 0 ? */ 507 #if 0 508 /* 509 * This situation can happen in perfectly normal operation 510 * if the ESP is reselected while using DMA to select 511 * another target. As such, don't print the warning. 512 */ 513 printf("%s: xfer (%d) > req (%d)\n", 514 sc->sc_dev.dv_xname, trans, esc->sc_dmasize); 515 #endif 516 trans = esc->sc_dmasize; 517 } 518 519 NCR_DMA(("dmaintr: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n", 520 NCR_READ_REG(sc, NCR_TCL), 521 NCR_READ_REG(sc, NCR_TCM), 522 (sc->sc_cfg2 & NCRCFG2_FE) ? NCR_READ_REG(sc, NCR_TCH) : 0, 523 trans, resid)); 524 525 *esc->sc_dmalen -= trans; 526 *esc->sc_dmaaddr += trans; 527 528 return 0; 529 } 530 531 int 532 pcscp_dma_setup(sc, addr, len, datain, dmasize) 533 struct ncr53c9x_softc *sc; 534 caddr_t *addr; 535 size_t *len; 536 int datain; 537 size_t *dmasize; 538 { 539 struct pcscp_softc *esc = (struct pcscp_softc *)sc; 540 bus_dmamap_t dmap = esc->sc_xfermap; 541 u_int32_t *mdl; 542 int error, nseg, seg; 543 bus_addr_t s_offset, s_addr; 544 long rest, count; 545 546 WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain ? DMACMD_DIR : 0)); 547 548 esc->sc_dmaaddr = addr; 549 esc->sc_dmalen = len; 550 esc->sc_dmasize = *dmasize; 551 esc->sc_datain = datain; 552 553 #ifdef DIAGNOSTIC 554 if ((*dmasize / MDL_SEG_SIZE) > MDL_SIZE) 555 panic("pcscp: transfer size too large"); 556 #endif 557 558 /* 559 * No need to set up DMA in `Transfer Pad' operation. 560 * (case of *dmasize == 0) 561 */ 562 if (*dmasize == 0) 563 return 0; 564 565 error = bus_dmamap_load(esc->sc_dmat, dmap, *esc->sc_dmaaddr, 566 *esc->sc_dmalen, NULL, 567 ((sc->sc_nexus->xs->xs_control & XS_CTL_NOSLEEP) ? 568 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 569 ((sc->sc_nexus->xs->xs_control & XS_CTL_DATA_IN) ? 570 BUS_DMA_READ : BUS_DMA_WRITE)); 571 if (error) { 572 printf("%s: unable to load dmamap, error = %d\n", 573 sc->sc_dev.dv_xname, error); 574 return error; 575 } 576 577 /* set transfer length */ 578 WRITE_DMAREG(esc, DMA_STC, *dmasize); 579 580 /* set up MDL */ 581 mdl = esc->sc_mdladdr; 582 nseg = dmap->dm_nsegs; 583 584 /* the first segment is possibly not aligned with 4k MDL boundary */ 585 count = dmap->dm_segs[0].ds_len; 586 s_addr = dmap->dm_segs[0].ds_addr; 587 s_offset = s_addr & MDL_SEG_OFFSET; 588 s_addr -= s_offset; 589 rest = MDL_SEG_SIZE - s_offset; 590 591 /* set the first MDL and offset */ 592 WRITE_DMAREG(esc, DMA_SPA, s_offset); 593 *mdl++ = htole32(s_addr); 594 count -= rest; 595 596 /* rests of the first dmamap segment */ 597 while (count > 0) { 598 s_addr += MDL_SEG_SIZE; 599 *mdl++ = htole32(s_addr); 600 count -= MDL_SEG_SIZE; 601 } 602 603 /* the rest dmamap segments are aligned with 4k boundary */ 604 for (seg = 1; seg < nseg; seg++) { 605 count = dmap->dm_segs[seg].ds_len; 606 s_addr = dmap->dm_segs[seg].ds_addr; 607 608 /* first 4kbyte of each dmamap segment */ 609 *mdl++ = htole32(s_addr); 610 count -= MDL_SEG_SIZE; 611 612 /* trailing contiguous 4k frames of each dmamap segments */ 613 while (count > 0) { 614 s_addr += MDL_SEG_SIZE; 615 *mdl++ = htole32(s_addr); 616 count -= MDL_SEG_SIZE; 617 } 618 } 619 620 return 0; 621 } 622 623 void 624 pcscp_dma_go(sc) 625 struct ncr53c9x_softc *sc; 626 { 627 struct pcscp_softc *esc = (struct pcscp_softc *)sc; 628 bus_dmamap_t dmap = esc->sc_xfermap, mdldmap = esc->sc_mdldmap; 629 int datain = esc->sc_datain; 630 631 /* No DMA transfer in Transfer Pad operation */ 632 if (esc->sc_dmasize == 0) 633 return; 634 635 /* sync transfer buffer */ 636 bus_dmamap_sync(esc->sc_dmat, dmap, 0, dmap->dm_mapsize, 637 datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 638 639 /* sync MDL */ 640 bus_dmamap_sync(esc->sc_dmat, mdldmap, 0, mdldmap->dm_mapsize, 641 BUS_DMASYNC_PREWRITE); 642 643 /* set Starting MDL Address */ 644 WRITE_DMAREG(esc, DMA_SMDLA, mdldmap->dm_segs[0].ds_addr); 645 646 /* set DMA command register bits */ 647 /* XXX DMA Transfer Interrupt Enable bit is broken? */ 648 WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | DMACMD_MDL | 649 /* DMACMD_INTE | */ 650 (datain ? DMACMD_DIR : 0)); 651 652 /* issue DMA start command */ 653 WRITE_DMAREG(esc, DMA_CMD, DMACMD_START | DMACMD_MDL | 654 /* DMACMD_INTE | */ 655 (datain ? DMACMD_DIR : 0)); 656 657 esc->sc_active = 1; 658 } 659 660 void 661 pcscp_dma_stop(sc) 662 struct ncr53c9x_softc *sc; 663 { 664 struct pcscp_softc *esc = (struct pcscp_softc *)sc; 665 666 /* dma stop */ 667 /* XXX What should we do here ? */ 668 WRITE_DMAREG(esc, DMA_CMD, 669 DMACMD_ABORT | (esc->sc_datain ? DMACMD_DIR : 0)); 670 671 esc->sc_active = 0; 672 } 673 674 int 675 pcscp_dma_isactive(sc) 676 struct ncr53c9x_softc *sc; 677 { 678 struct pcscp_softc *esc = (struct pcscp_softc *)sc; 679 680 /* XXX should check esc->sc_active? */ 681 if ((READ_DMAREG(esc, DMA_CMD) & DMACMD_CMD) != DMACMD_IDLE) 682 return 1; 683 return 0; 684 } 685