1 /* $NetBSD: pcscp.c,v 1.50 2023/05/17 18:20:30 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center; Izumi Tsutsui.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * pcscp.c: device dependent code for AMD Am53c974 (PCscsi-PCI)
35 * written by Izumi Tsutsui <tsutsui@NetBSD.org>
36 *
37 * Technical manual available at
38 * https://www.amd.com/system/files/TechDocs/19113.pdf
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: pcscp.c,v 1.50 2023/05/17 18:20:30 tsutsui Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/device.h>
47 #include <sys/buf.h>
48
49 #include <sys/bus.h>
50 #include <sys/intr.h>
51
52 #include <dev/scsipi/scsipi_all.h>
53 #include <dev/scsipi/scsi_all.h>
54 #include <dev/scsipi/scsiconf.h>
55
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcidevs.h>
59
60 #include <dev/ic/ncr53c9xreg.h>
61 #include <dev/ic/ncr53c9xvar.h>
62
63 #include <dev/pci/pcscpreg.h>
64
65 #define IO_MAP_REG 0x10
66
67 struct pcscp_softc {
68 struct ncr53c9x_softc sc_ncr53c9x; /* glue to MI code */
69
70 bus_space_tag_t sc_st; /* bus space tag */
71 bus_space_handle_t sc_sh; /* bus space handle */
72 void *sc_ih; /* interrupt cookie */
73
74 bus_dma_tag_t sc_dmat; /* DMA tag */
75
76 bus_dmamap_t sc_xfermap; /* DMA map for transfers */
77
78 uint32_t *sc_mdladdr; /* MDL array */
79 bus_dmamap_t sc_mdldmap; /* MDL DMA map */
80
81 int sc_active; /* DMA state */
82 int sc_datain; /* DMA Data Direction */
83 size_t sc_dmasize; /* DMA size */
84 uint8_t **sc_dmaaddr; /* DMA address */
85 size_t *sc_dmalen; /* DMA length */
86 };
87
88 #define READ_DMAREG(sc, reg) \
89 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
90 #define WRITE_DMAREG(sc, reg, var) \
91 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (var))
92
93 #define PCSCP_READ_REG(sc, reg) \
94 bus_space_read_1((sc)->sc_st, (sc)->sc_sh, (reg) << 2)
95 #define PCSCP_WRITE_REG(sc, reg, val) \
96 bus_space_write_1((sc)->sc_st, (sc)->sc_sh, (reg) << 2, (val))
97
98
99 static int pcscp_match(device_t, cfdata_t, void *);
100 static void pcscp_attach(device_t, device_t, void *);
101
102 CFATTACH_DECL_NEW(pcscp, sizeof(struct pcscp_softc),
103 pcscp_match, pcscp_attach, NULL, NULL);
104
105 /*
106 * Functions and the switch for the MI code.
107 */
108
109 static uint8_t pcscp_read_reg(struct ncr53c9x_softc *, int);
110 static void pcscp_write_reg(struct ncr53c9x_softc *, int, uint8_t);
111 static int pcscp_dma_isintr(struct ncr53c9x_softc *);
112 static void pcscp_dma_reset(struct ncr53c9x_softc *);
113 static int pcscp_dma_intr(struct ncr53c9x_softc *);
114 static int pcscp_dma_setup(struct ncr53c9x_softc *, uint8_t **, size_t *,
115 int, size_t *);
116 static void pcscp_dma_go(struct ncr53c9x_softc *);
117 static void pcscp_dma_stop(struct ncr53c9x_softc *);
118 static int pcscp_dma_isactive(struct ncr53c9x_softc *);
119
120 static struct ncr53c9x_glue pcscp_glue = {
121 pcscp_read_reg,
122 pcscp_write_reg,
123 pcscp_dma_isintr,
124 pcscp_dma_reset,
125 pcscp_dma_intr,
126 pcscp_dma_setup,
127 pcscp_dma_go,
128 pcscp_dma_stop,
129 pcscp_dma_isactive,
130 NULL, /* gl_clear_latched_intr */
131 };
132
133 static int
pcscp_match(device_t parent,cfdata_t cf,void * aux)134 pcscp_match(device_t parent, cfdata_t cf, void *aux)
135 {
136 struct pci_attach_args *pa = aux;
137
138 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_AMD)
139 return 0;
140
141 switch (PCI_PRODUCT(pa->pa_id)) {
142 case PCI_PRODUCT_AMD_PCSCSI_PCI:
143 return 1;
144 }
145 return 0;
146 }
147
148 /*
149 * Attach this instance, and then all the sub-devices
150 */
151 static void
pcscp_attach(device_t parent,device_t self,void * aux)152 pcscp_attach(device_t parent, device_t self, void *aux)
153 {
154 struct pcscp_softc *esc = device_private(self);
155 struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x;
156 struct pci_attach_args *pa = aux;
157 bus_space_tag_t iot;
158 bus_space_handle_t ioh;
159 pci_intr_handle_t ih;
160 const char *intrstr;
161 pcireg_t csr;
162 bus_dma_segment_t seg;
163 int error, rseg;
164 char intrbuf[PCI_INTRSTR_LEN];
165
166 sc->sc_dev = self;
167 pci_aprint_devinfo(pa, NULL);
168 aprint_normal("%s", device_xname(sc->sc_dev));
169
170 if (pci_mapreg_map(pa, IO_MAP_REG, PCI_MAPREG_TYPE_IO, 0,
171 &iot, &ioh, NULL, NULL)) {
172 aprint_error(": unable to map registers\n");
173 return;
174 }
175
176 sc->sc_glue = &pcscp_glue;
177
178 esc->sc_st = iot;
179 esc->sc_sh = ioh;
180 esc->sc_dmat = pa->pa_dmat;
181
182 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
183 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
184 csr | PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE);
185
186 /*
187 * XXX More of this should be in ncr53c9x_attach(), but
188 * XXX should we really poke around the chip that much in
189 * XXX the MI code? Think about this more...
190 */
191
192 /*
193 * Set up static configuration info.
194 */
195
196 /*
197 * XXX should read configuration from EEPROM?
198 *
199 * MI ncr53c9x driver does not support configuration
200 * per each target device, though...
201 */
202 sc->sc_id = 7;
203 sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB;
204 sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_FE;
205 sc->sc_cfg3 = NCRAMDCFG3_IDM | NCRAMDCFG3_FCLK;
206 sc->sc_cfg4 = NCRAMDCFG4_GE12NS | NCRAMDCFG4_RADE;
207 sc->sc_rev = NCR_VARIANT_AM53C974;
208 sc->sc_features = NCR_F_FASTSCSI;
209 sc->sc_cfg3_fscsi = NCRAMDCFG3_FSCSI;
210 sc->sc_freq = 40; /* MHz */
211
212 /*
213 * XXX minsync and maxxfer _should_ be set up in MI code,
214 * XXX but it appears to have some dependency on what sort
215 * XXX of DMA we're hooked up to, etc.
216 */
217
218 /*
219 * This is the value used to start sync negotiations
220 * Note that the NCR register "SYNCTP" is programmed
221 * in "clocks per byte", and has a minimum value of 4.
222 * The SCSI period used in negotiation is one-fourth
223 * of the time (in nanoseconds) needed to transfer one byte.
224 * Since the chip's clock is given in MHz, we have the following
225 * formula: 4 * period = (1000 / freq) * 4
226 */
227
228 sc->sc_minsync = 1000 / sc->sc_freq;
229
230 /* Really no limit, but since we want to fit into the TCR... */
231 sc->sc_maxxfer = 16 * 1024 * 1024;
232
233 /*
234 * Create the DMA maps for the data transfers.
235 */
236
237 #define MDL_SEG_SIZE 0x1000 /* 4kbyte per segment */
238 #define MDL_SEG_OFFSET 0x0FFF
239 #define MDL_SIZE (MAXPHYS / MDL_SEG_SIZE + 1) /* no hardware limit? */
240
241 if (bus_dmamap_create(esc->sc_dmat, MAXPHYS, MDL_SIZE, MDL_SEG_SIZE,
242 MDL_SEG_SIZE, BUS_DMA_NOWAIT, &esc->sc_xfermap)) {
243 aprint_error(": can't create DMA maps\n");
244 return;
245 }
246
247 /*
248 * Allocate and map memory for the MDL.
249 */
250
251 if ((error = bus_dmamem_alloc(esc->sc_dmat,
252 sizeof(uint32_t) * MDL_SIZE, PAGE_SIZE, 0, &seg, 1, &rseg,
253 BUS_DMA_NOWAIT)) != 0) {
254 aprint_error(": unable to allocate memory for the MDL,"
255 " error = %d\n", error);
256 goto fail_0;
257 }
258 if ((error = bus_dmamem_map(esc->sc_dmat, &seg, rseg,
259 sizeof(uint32_t) * MDL_SIZE , (void **)&esc->sc_mdladdr,
260 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
261 aprint_error(": unable to map the MDL memory, error = %d\n",
262 error);
263 goto fail_1;
264 }
265 if ((error = bus_dmamap_create(esc->sc_dmat,
266 sizeof(uint32_t) * MDL_SIZE, 1, sizeof(uint32_t) * MDL_SIZE,
267 0, BUS_DMA_NOWAIT, &esc->sc_mdldmap)) != 0) {
268 aprint_error(": unable to map_create for the MDL, error = %d\n",
269 error);
270 goto fail_2;
271 }
272 if ((error = bus_dmamap_load(esc->sc_dmat, esc->sc_mdldmap,
273 esc->sc_mdladdr, sizeof(uint32_t) * MDL_SIZE,
274 NULL, BUS_DMA_NOWAIT)) != 0) {
275 aprint_error(": unable to load for the MDL, error = %d\n",
276 error);
277 goto fail_3;
278 }
279
280 /* map and establish interrupt */
281 if (pci_intr_map(pa, &ih)) {
282 aprint_error(": couldn't map interrupt\n");
283 goto fail_4;
284 }
285
286 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
287 esc->sc_ih = pci_intr_establish_xname(pa->pa_pc, ih, IPL_BIO,
288 ncr53c9x_intr, esc, device_xname(self));
289 if (esc->sc_ih == NULL) {
290 aprint_error(": couldn't establish interrupt");
291 if (intrstr != NULL)
292 aprint_error(" at %s", intrstr);
293 aprint_error("\n");
294 goto fail_4;
295 }
296 if (intrstr != NULL) {
297 aprint_normal(": interrupting at %s\n", intrstr);
298 aprint_normal("%s", device_xname(sc->sc_dev));
299 }
300
301 /* Do the common parts of attachment. */
302 sc->sc_adapter.adapt_minphys = minphys;
303 sc->sc_adapter.adapt_request = ncr53c9x_scsipi_request;
304 ncr53c9x_attach(sc);
305
306 /* Turn on target selection using the `DMA' method */
307 sc->sc_features |= NCR_F_DMASELECT;
308
309 return;
310
311 fail_4:
312 bus_dmamap_unload(esc->sc_dmat, esc->sc_mdldmap);
313 fail_3:
314 bus_dmamap_destroy(esc->sc_dmat, esc->sc_mdldmap);
315 fail_2:
316 bus_dmamem_unmap(esc->sc_dmat, (void *)esc->sc_mdldmap,
317 sizeof(uint32_t) * MDL_SIZE);
318 fail_1:
319 bus_dmamem_free(esc->sc_dmat, &seg, rseg);
320 fail_0:
321 bus_dmamap_destroy(esc->sc_dmat, esc->sc_xfermap);
322 }
323
324 /*
325 * Glue functions.
326 */
327
328 static uint8_t
pcscp_read_reg(struct ncr53c9x_softc * sc,int reg)329 pcscp_read_reg(struct ncr53c9x_softc *sc, int reg)
330 {
331 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
332
333 return PCSCP_READ_REG(esc, reg);
334 }
335
336 static void
pcscp_write_reg(struct ncr53c9x_softc * sc,int reg,uint8_t v)337 pcscp_write_reg(struct ncr53c9x_softc *sc, int reg, uint8_t v)
338 {
339 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
340
341 PCSCP_WRITE_REG(esc, reg, v);
342 }
343
344 static int
pcscp_dma_isintr(struct ncr53c9x_softc * sc)345 pcscp_dma_isintr(struct ncr53c9x_softc *sc)
346 {
347 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
348
349 return (PCSCP_READ_REG(esc, NCR_STAT) & NCRSTAT_INT) != 0;
350 }
351
352 static void
pcscp_dma_reset(struct ncr53c9x_softc * sc)353 pcscp_dma_reset(struct ncr53c9x_softc *sc)
354 {
355 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
356
357 WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE);
358
359 esc->sc_active = 0;
360 }
361
362 static int
pcscp_dma_intr(struct ncr53c9x_softc * sc)363 pcscp_dma_intr(struct ncr53c9x_softc *sc)
364 {
365 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
366 int trans, resid, i;
367 bus_dmamap_t dmap = esc->sc_xfermap;
368 int datain = esc->sc_datain;
369 uint32_t dmastat;
370 uint8_t *p = NULL;
371
372 dmastat = READ_DMAREG(esc, DMA_STAT);
373
374 if (dmastat & DMASTAT_ERR) {
375 /* XXX not tested... */
376 WRITE_DMAREG(esc, DMA_CMD,
377 DMACMD_ABORT | (datain ? DMACMD_DIR : 0));
378
379 printf("%s: error: DMA error detected; Aborting.\n",
380 device_xname(sc->sc_dev));
381 bus_dmamap_unload(esc->sc_dmat, dmap);
382 return -1;
383 }
384
385 if (dmastat & DMASTAT_ABT) {
386 /* XXX What should be done? */
387 printf("%s: %s: DMA aborted.\n",
388 device_xname(sc->sc_dev), __func__);
389 WRITE_DMAREG(esc, DMA_CMD,
390 DMACMD_IDLE | (datain ? DMACMD_DIR : 0));
391 esc->sc_active = 0;
392 return 0;
393 }
394
395 #ifdef DIAGNOSTIC
396 /* This is an "assertion" :) */
397 if (esc->sc_active == 0)
398 panic("%s: %s: DMA wasn't active",
399 device_xname(sc->sc_dev), __func__);
400 #endif
401
402 /* DMA has stopped */
403
404 esc->sc_active = 0;
405
406 if (esc->sc_dmasize == 0) {
407 /* A "Transfer Pad" operation completed */
408 NCR_DMA(("%s: discarded %d bytes (tcl=%d, tcm=%d)\n",
409 __func__,
410 PCSCP_READ_REG(esc, NCR_TCL) |
411 (PCSCP_READ_REG(esc, NCR_TCM) << 8),
412 PCSCP_READ_REG(esc, NCR_TCL),
413 PCSCP_READ_REG(esc, NCR_TCM)));
414 return 0;
415 }
416
417 resid = 0;
418 /*
419 * If a transfer onto the SCSI bus gets interrupted by the device
420 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
421 * as residual since the ESP counter registers get decremented as
422 * bytes are clocked into the FIFO.
423 */
424 if (!datain &&
425 (resid = (PCSCP_READ_REG(esc, NCR_FFLAG) & NCRFIFO_FF)) != 0) {
426 NCR_DMA(("%s: empty esp FIFO of %d ", __func__, resid));
427 }
428
429 if ((sc->sc_espstat & NCRSTAT_TC) == 0) {
430 /*
431 * `Terminal count' is off, so read the residue
432 * out of the ESP counter registers.
433 */
434 if (datain) {
435 resid = PCSCP_READ_REG(esc, NCR_FFLAG) & NCRFIFO_FF;
436 while (resid > 1)
437 resid =
438 PCSCP_READ_REG(esc, NCR_FFLAG) & NCRFIFO_FF;
439 WRITE_DMAREG(esc, DMA_CMD, DMACMD_BLAST | DMACMD_MDL |
440 (datain ? DMACMD_DIR : 0));
441
442 for (i = 0; i < 1000; i++) { /* XXX */
443 if (READ_DMAREG(esc, DMA_STAT) & DMASTAT_BCMP)
444 break;
445 DELAY(1);
446 }
447
448 /* See the below comments... */
449 if (resid)
450 p = *esc->sc_dmaaddr;
451 }
452
453 resid += PCSCP_READ_REG(esc, NCR_TCL) |
454 (PCSCP_READ_REG(esc, NCR_TCM) << 8) |
455 (PCSCP_READ_REG(esc, NCR_TCH) << 16);
456 } else {
457 while ((dmastat & DMASTAT_DONE) == 0)
458 dmastat = READ_DMAREG(esc, DMA_STAT);
459 }
460
461 WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain ? DMACMD_DIR : 0));
462
463 /* sync MDL */
464 bus_dmamap_sync(esc->sc_dmat, esc->sc_mdldmap,
465 0, sizeof(uint32_t) * dmap->dm_nsegs, BUS_DMASYNC_POSTWRITE);
466 /* sync transfer buffer */
467 bus_dmamap_sync(esc->sc_dmat, dmap, 0, dmap->dm_mapsize,
468 datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
469 bus_dmamap_unload(esc->sc_dmat, dmap);
470
471 trans = esc->sc_dmasize - resid;
472
473 /*
474 * From the technical manual notes:
475 *
476 * `In some odd byte conditions, one residual byte will be left
477 * in the SCSI FIFO, and the FIFO flags will never count to 0.
478 * When this happens, the residual byte should be retrieved
479 * via PIO following completion of the BLAST operation.'
480 */
481
482 if (p) {
483 p += trans;
484 *p = PCSCP_READ_REG(esc, NCR_FIFO);
485 trans++;
486 }
487
488 if (trans < 0) { /* transferred < 0 ? */
489 #if 0
490 /*
491 * This situation can happen in perfectly normal operation
492 * if the ESP is reselected while using DMA to select
493 * another target. As such, don't print the warning.
494 */
495 printf("%s: xfer (%d) > req (%d)\n",
496 device_xname(sc->sc_dev), trans, esc->sc_dmasize);
497 #endif
498 trans = esc->sc_dmasize;
499 }
500
501 NCR_DMA(("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n",
502 __func__,
503 PCSCP_READ_REG(esc, NCR_TCL),
504 PCSCP_READ_REG(esc, NCR_TCM),
505 PCSCP_READ_REG(esc, NCR_TCH),
506 trans, resid));
507
508 *esc->sc_dmalen -= trans;
509 *esc->sc_dmaaddr += trans;
510
511 return 0;
512 }
513
514 static int
pcscp_dma_setup(struct ncr53c9x_softc * sc,uint8_t ** addr,size_t * len,int datain,size_t * dmasize)515 pcscp_dma_setup(struct ncr53c9x_softc *sc, uint8_t **addr, size_t *len,
516 int datain, size_t *dmasize)
517 {
518 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
519 bus_dmamap_t dmap = esc->sc_xfermap;
520 uint32_t *mdl;
521 int error, nseg, seg;
522 bus_addr_t s_offset, s_addr;
523
524 WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain ? DMACMD_DIR : 0));
525
526 esc->sc_dmaaddr = addr;
527 esc->sc_dmalen = len;
528 esc->sc_dmasize = *dmasize;
529 esc->sc_datain = datain;
530
531 #ifdef DIAGNOSTIC
532 if ((*dmasize / MDL_SEG_SIZE) > MDL_SIZE)
533 panic("%s: transfer size too large", device_xname(sc->sc_dev));
534 #endif
535
536 /*
537 * No need to set up DMA in `Transfer Pad' operation.
538 * (case of *dmasize == 0)
539 */
540 if (*dmasize == 0)
541 return 0;
542
543 error = bus_dmamap_load(esc->sc_dmat, dmap, *esc->sc_dmaaddr,
544 *esc->sc_dmalen, NULL,
545 ((sc->sc_nexus->xs->xs_control & XS_CTL_NOSLEEP) ?
546 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
547 ((sc->sc_nexus->xs->xs_control & XS_CTL_DATA_IN) ?
548 BUS_DMA_READ : BUS_DMA_WRITE));
549 if (error) {
550 printf("%s: unable to load dmamap, error = %d\n",
551 device_xname(sc->sc_dev), error);
552 return error;
553 }
554
555 /* set transfer length */
556 WRITE_DMAREG(esc, DMA_STC, *dmasize);
557
558 /* set up MDL */
559 mdl = esc->sc_mdladdr;
560 nseg = dmap->dm_nsegs;
561
562 /* the first segment is possibly not aligned with 4k MDL boundary */
563 s_addr = dmap->dm_segs[0].ds_addr;
564 s_offset = s_addr & MDL_SEG_OFFSET;
565 s_addr -= s_offset;
566
567 /* set the first MDL and offset */
568 WRITE_DMAREG(esc, DMA_SPA, s_offset);
569 *mdl++ = htole32(s_addr);
570
571 /* the rest dmamap segments are aligned with 4k boundary */
572 for (seg = 1; seg < nseg; seg++)
573 *mdl++ = htole32(dmap->dm_segs[seg].ds_addr);
574
575 return 0;
576 }
577
578 static void
pcscp_dma_go(struct ncr53c9x_softc * sc)579 pcscp_dma_go(struct ncr53c9x_softc *sc)
580 {
581 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
582 bus_dmamap_t dmap = esc->sc_xfermap, mdldmap = esc->sc_mdldmap;
583 int datain = esc->sc_datain;
584
585 /* No DMA transfer in Transfer Pad operation */
586 if (esc->sc_dmasize == 0)
587 return;
588
589 /* sync transfer buffer */
590 bus_dmamap_sync(esc->sc_dmat, dmap, 0, dmap->dm_mapsize,
591 datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
592
593 /* sync MDL */
594 bus_dmamap_sync(esc->sc_dmat, mdldmap,
595 0, sizeof(uint32_t) * dmap->dm_nsegs, BUS_DMASYNC_PREWRITE);
596
597 /* set Starting MDL Address */
598 WRITE_DMAREG(esc, DMA_SMDLA, mdldmap->dm_segs[0].ds_addr);
599
600 /* set DMA command register bits */
601 /* XXX DMA Transfer Interrupt Enable bit is broken? */
602 WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | DMACMD_MDL |
603 /* DMACMD_INTE | */
604 (datain ? DMACMD_DIR : 0));
605
606 /* issue DMA start command */
607 WRITE_DMAREG(esc, DMA_CMD, DMACMD_START | DMACMD_MDL |
608 /* DMACMD_INTE | */
609 (datain ? DMACMD_DIR : 0));
610
611 esc->sc_active = 1;
612 }
613
614 static void
pcscp_dma_stop(struct ncr53c9x_softc * sc)615 pcscp_dma_stop(struct ncr53c9x_softc *sc)
616 {
617 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
618
619 /* DMA stop */
620 /* XXX What should we do here ? */
621 WRITE_DMAREG(esc, DMA_CMD,
622 DMACMD_ABORT | (esc->sc_datain ? DMACMD_DIR : 0));
623 bus_dmamap_unload(esc->sc_dmat, esc->sc_xfermap);
624
625 esc->sc_active = 0;
626 }
627
628 static int
pcscp_dma_isactive(struct ncr53c9x_softc * sc)629 pcscp_dma_isactive(struct ncr53c9x_softc *sc)
630 {
631 struct pcscp_softc *esc = (struct pcscp_softc *)sc;
632
633 /* XXX should check esc->sc_active? */
634 if ((READ_DMAREG(esc, DMA_CMD) & DMACMD_CMD) != DMACMD_IDLE)
635 return 1;
636 return 0;
637 }
638