xref: /netbsd-src/sys/arch/sun3/dev/dma.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 /*	$NetBSD: dma.c,v 1.23 2018/09/03 16:29:28 riastradh Exp $ */
2 
3 /*
4  * Copyright (c) 1994 Paul Kranenburg.  All rights reserved.
5  * Copyright (c) 1994 Peter Galbavy.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Peter Galbavy.
18  * 4. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.23 2018/09/03 16:29:28 riastradh Exp $");
35 
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/errno.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43 
44 #include <machine/autoconf.h>
45 #include <machine/dvma.h>
46 
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsipi_all.h>
49 #include <dev/scsipi/scsiconf.h>
50 
51 #include <dev/ic/ncr53c9xreg.h>
52 #include <dev/ic/ncr53c9xvar.h>
53 
54 #include <sun3/dev/dmareg.h>
55 #include <sun3/dev/dmavar.h>
56 
57 #include "ioconf.h"
58 
59 #define MAX_DMA_SZ	0x01000000	/* 16MB */
60 
61 static int	dmamatch(device_t, cfdata_t, void *);
62 static void	dmaattach(device_t, device_t, void *);
63 
64 CFATTACH_DECL_NEW(dma, sizeof(struct dma_softc),
65     dmamatch, dmaattach, NULL, NULL);
66 
67 static int
68 dmamatch(device_t parent, cfdata_t cf, void *aux)
69 {
70 	struct confargs *ca = aux;
71 
72 	/*
73 	 * Check for the DMA registers.
74 	 */
75 	if (bus_peek(ca->ca_bustype, ca->ca_paddr, 4) == -1)
76 		return 0;
77 
78 	/* If default ipl, fill it in. */
79 	if (ca->ca_intpri == -1)
80 		ca->ca_intpri = 2;
81 
82 	return 1;
83 }
84 
85 static void
86 dmaattach(device_t parent, device_t self, void *aux)
87 {
88 	struct dma_softc *sc = device_private(self);
89 	struct confargs *ca = aux;
90 	int id;
91 
92 	sc->sc_dev = self;
93 
94 #if 0
95 	/* indirect functions */
96 	sc->intr = espdmaintr;
97 	sc->setup = dma_setup;
98 	sc->reset = dma_reset;
99 #endif
100 
101 	/*
102 	 * Map in the registers.
103 	 */
104 	sc->sc_bst = ca->ca_bustag;
105 	sc->sc_dmatag = ca->ca_dmatag;
106 	if (bus_space_map(sc->sc_bst, ca->ca_paddr, DMAREG_SIZE,
107 	    0, &sc->sc_bsh) != 0) {
108 		aprint_error(": can't map register\n");
109 		return;
110 	}
111 	/*
112 	 * Allocate dmamap.
113 	 */
114 	if (bus_dmamap_create(sc->sc_dmatag, MAXPHYS, 1, MAXPHYS,
115 	    0, BUS_DMA_NOWAIT, &sc->sc_dmamap) != 0) {
116 		aprint_error(": can't create DMA map\n");
117 		return;
118 	}
119 
120 	sc->sc_rev = DMA_GCSR(sc) & D_DEV_ID;
121 	id = (sc->sc_rev >> 28) & 0xf;
122 	aprint_normal(": rev %d\n", id);
123 
124 	/*
125 	 * Make sure the DMA chip is supported revision.
126 	 * The Sun3/80 used only the old rev zero chip,
127 	 * so the initialization has been simplified.
128 	 */
129 	switch (sc->sc_rev) {
130 	case DMAREV_0:
131 	case DMAREV_1:
132 		break;
133 	default:
134 		panic("unsupported dma rev");
135 	}
136 }
137 
138 /*
139  * This is called by espattach to get our softc.
140  */
141 struct dma_softc *
142 espdmafind(int unit)
143 {
144 	struct dma_softc *dma;
145 
146 	dma = device_lookup_private(&dma_cd, unit);
147 	if (dma == NULL)
148 		panic("%s: no dma", __func__);
149 	return dma;
150 }
151 
152 #define DMAWAIT(SC, COND, MSG, DONTPANIC) do if (COND) {		\
153 	int count = 100000;						\
154 	while ((COND) && --count > 0)					\
155 		DELAY(5);						\
156 	if (count == 0) {						\
157 		printf("%s: line %d: CSR = 0x%x\n",			\
158 			__FILE__, __LINE__, DMA_GCSR(SC));		\
159 		if (DONTPANIC)						\
160 			printf(MSG);					\
161 		else							\
162 			panic(MSG);					\
163 	}								\
164 } while (/* CONSTCOND */0)
165 
166 #define DMA_DRAIN(sc, dontpanic) do {					\
167 	uint32_t _csr;							\
168 	/*								\
169 	 * DMA rev0 & rev1: we are not allowed to touch the DMA "flush"	\
170 	 *     and "drain" bits while it is still thinking about a	\
171 	 *     request.							\
172 	 * other revs: D_R_PEND bit reads as 0				\
173 	 */								\
174 	DMAWAIT(sc, DMA_GCSR(sc) & D_R_PEND, "R_PEND", dontpanic);	\
175 	/*								\
176 	 * Select drain bit (always rev 0,1)				\
177 	 * also clears errors and D_TC flag				\
178 	 */								\
179 	_csr = DMA_GCSR(sc);						\
180 	_csr |= D_DRAIN;						\
181 	DMA_SCSR(sc, _csr);						\
182 	/*								\
183 	 * Wait for draining to finish					\
184 	 */								\
185 	DMAWAIT(sc, DMA_GCSR(sc) & D_PACKCNT, "DRAINING", dontpanic);	\
186 } while (/* CONSTCOND */0)
187 
188 #define DMA_FLUSH(sc, dontpanic) do {					\
189 	uint32_t _csr;							\
190 	/*								\
191 	 * DMA rev0 & rev1: we are not allowed to touch the DMA "flush"	\
192 	 *     and "drain" bits while it is still thinking about a	\
193 	 *     request.							\
194 	 * other revs: D_R_PEND bit reads as 0				\
195 	 */								\
196 	DMAWAIT(sc, DMA_GCSR(sc) & D_R_PEND, "R_PEND", dontpanic);	\
197 	_csr = DMA_GCSR(sc);						\
198 	_csr &= ~(D_WRITE|D_EN_DMA);					\
199 	DMA_SCSR(sc, _csr);						\
200 	_csr |= D_FLUSH;						\
201 	DMA_SCSR(sc, _csr);						\
202 } while (/* CONSTCOND */0)
203 
204 void
205 dma_reset(struct dma_softc *sc)
206 {
207 	uint32_t csr;
208 
209 	if (sc->sc_dmamap->dm_nsegs > 0)
210 		bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap);
211 
212 	DMA_FLUSH(sc, 1);
213 	csr = DMA_GCSR(sc);
214 
215 	csr |= D_RESET;			/* reset DMA */
216 	DMA_SCSR(sc, csr);
217 	DELAY(200);			/* what should this be ? */
218 
219 	/*DMAWAIT1(sc); why was this here? */
220 	csr = DMA_GCSR(sc);
221 	csr &= ~D_RESET;		/* de-assert reset line */
222 	DMA_SCSR(sc, csr);
223 	DELAY(5);			/* allow a few ticks to settle */
224 
225 	/*
226 	 * Get transfer burst size from (?) and plug it into the
227 	 * controller registers. This is needed on the Sun4m...
228 	 * Do we need it too?  Apparently not, because the 3/80
229 	 * always has the old, REV zero DMA chip.
230 	 */
231 	csr = DMA_GCSR(sc);
232 	csr |= D_INT_EN;		/* enable interrupts */
233 
234 	DMA_SCSR(sc, csr);
235 
236 	sc->sc_active = 0;
237 }
238 
239 
240 #define DMAMAX(a)	(MAX_DMA_SZ - ((a) & (MAX_DMA_SZ-1)))
241 
242 /*
243  * setup a dma transfer
244  */
245 int
246 dma_setup(struct dma_softc *sc, uint8_t **addr, size_t *len, int datain,
247     size_t *dmasize)
248 {
249 	uint32_t csr;
250 
251 	DMA_FLUSH(sc, 0);
252 
253 #if 0
254 	DMA_SCSR(sc, DMA_GCSR(sc) & ~D_INT_EN);
255 #endif
256 	sc->sc_dmaaddr = addr;
257 	sc->sc_dmalen = len;
258 
259 	NCR_DMA(("%s: start %d@%p,%d\n", device_xname(sc->sc_dev),
260 	    *sc->sc_dmalen, *sc->sc_dmaaddr, datain ? 1 : 0));
261 
262 	/*
263 	 * the rules say we cannot transfer more than the limit
264 	 * of this DMA chip (64k for old and 16Mb for new),
265 	 * and we cannot cross a 16Mb boundary.
266 	 */
267 	*dmasize = sc->sc_dmasize =
268 	    uimin(*dmasize, DMAMAX((size_t) *sc->sc_dmaaddr));
269 
270 	NCR_DMA(("%s: dmasize = %d\n", __func__, sc->sc_dmasize));
271 
272 	/* Program the DMA address */
273 	if (sc->sc_dmasize) {
274 		if (bus_dmamap_load(sc->sc_dmatag, sc->sc_dmamap,
275 		    *sc->sc_dmaaddr, sc->sc_dmasize,
276 		    NULL /* kernel address */, BUS_DMA_NOWAIT))
277 			panic("%s: cannot allocate DVMA address",
278 			    device_xname(sc->sc_dev));
279 		bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize,
280 		    datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
281 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, DMA_REG_ADDR,
282 		    sc->sc_dmamap->dm_segs[0].ds_addr);
283 	}
284 
285 	/* We never have DMAREV_ESC. */
286 
287 	/* Setup DMA control register */
288 	csr = DMA_GCSR(sc);
289 	if (datain)
290 		csr |= D_WRITE;
291 	else
292 		csr &= ~D_WRITE;
293 	csr |= D_INT_EN;
294 	DMA_SCSR(sc, csr);
295 
296 	return 0;
297 }
298 
299 /*
300  * Pseudo (chained) interrupt from the esp driver to kick the
301  * current running DMA transfer. I am relying on espintr() to
302  * pickup and clean errors for now
303  *
304  * return 1 if it was a DMA continue.
305  */
306 int
307 espdmaintr(struct dma_softc *sc)
308 {
309 	struct ncr53c9x_softc *nsc = sc->sc_client;
310 	char bits[64];
311 	int trans, resid;
312 	uint32_t csr;
313 
314 	csr = DMA_GCSR(sc);
315 
316 #ifdef NCR53C9X_DEBUG
317         if (ncr53c9x_debug & NCR_SHOWDMA)
318 		snprintb(bits, sizeof(bits), DMACSRBITS, csr);
319 #endif
320 	NCR_DMA(("%s: intr: addr 0x%x, csr %s\n",
321 	    device_xname(sc->sc_dev), DMADDR(sc), bits));
322 
323 	if (csr & D_ERR_PEND) {
324 		snprintb(bits, sizeof(bits), DMACSRBITS, csr);
325 		printf("%s: error: csr=%s\n", device_xname(sc->sc_dev), bits);
326 		csr &= ~D_EN_DMA;	/* Stop DMA */
327 		DMA_SCSR(sc, csr);
328 		csr |= D_FLUSH;
329 		DMA_SCSR(sc, csr);
330 		return -1;
331 	}
332 
333 	/* This is an "assertion" :) */
334 	if (sc->sc_active == 0)
335 		panic("%s: DMA wasn't active", __func__);
336 
337 	DMA_DRAIN(sc, 0);
338 
339 	/* DMA has stopped */
340 	csr &= ~D_EN_DMA;
341 	DMA_SCSR(sc, csr);
342 	sc->sc_active = 0;
343 
344 	if (sc->sc_dmasize == 0) {
345 		/* A "Transfer Pad" operation completed */
346 		NCR_DMA(("%s: discarded %d bytes (tcl=%d, tcm=%d)\n",
347 		    __func__,
348 		    NCR_READ_REG(nsc, NCR_TCL) |
349 		    (NCR_READ_REG(nsc, NCR_TCM) << 8),
350 		    NCR_READ_REG(nsc, NCR_TCL),
351 		    NCR_READ_REG(nsc, NCR_TCM)));
352 		return 0;
353 	}
354 
355 	resid = 0;
356 	/*
357 	 * If a transfer onto the SCSI bus gets interrupted by the device
358 	 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
359 	 * as residual since the ESP counter registers get decremented as
360 	 * bytes are clocked into the FIFO.
361 	 */
362 	if (!(csr & D_WRITE) &&
363 	    (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) {
364 		NCR_DMA(("%s: empty esp FIFO of %d ", __func__, resid));
365 	}
366 
367 	if ((nsc->sc_espstat & NCRSTAT_TC) == 0) {
368 		/*
369 		 * `Terminal count' is off, so read the residue
370 		 * out of the ESP counter registers.
371 		 */
372 		resid += (NCR_READ_REG(nsc, NCR_TCL) |
373 		    (NCR_READ_REG(nsc, NCR_TCM) << 8) |
374 		    ((nsc->sc_cfg2 & NCRCFG2_FE) ?
375 		    (NCR_READ_REG(nsc, NCR_TCH) << 16) : 0));
376 
377 		if (resid == 0 && sc->sc_dmasize == 65536 &&
378 		    (nsc->sc_cfg2 & NCRCFG2_FE) == 0)
379 			/* A transfer of 64K is encoded as `TCL=TCM=0' */
380 			resid = 65536;
381 	}
382 
383 	trans = sc->sc_dmasize - resid;
384 	if (trans < 0) {			/* transferred < 0 ? */
385 #if 0
386 		/*
387 		 * This situation can happen in perfectly normal operation
388 		 * if the ESP is reselected while using DMA to select
389 		 * another target.  As such, don't print the warning.
390 		 */
391 		printf("%s: xfer (%d) > req (%d)\n",
392 		    device_xname(sc->sc_dev), trans, sc->sc_dmasize);
393 #endif
394 		trans = sc->sc_dmasize;
395 	}
396 
397 	NCR_DMA(("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n",
398 	    __func__,
399 	    NCR_READ_REG(nsc, NCR_TCL),
400 	    NCR_READ_REG(nsc, NCR_TCM),
401 	   (nsc->sc_cfg2 & NCRCFG2_FE) ?
402 	    NCR_READ_REG(nsc, NCR_TCH) : 0,
403 	    trans, resid));
404 
405 #ifdef	SUN3X_470_EVENTUALLY
406 	if (csr & D_WRITE)
407 		cache_flush(*sc->sc_dmaaddr, trans);
408 #endif
409 
410 	if (sc->sc_dmamap->dm_nsegs > 0) {
411 		bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize,
412 		    (csr & D_WRITE) != 0 ?
413 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
414 		bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap);
415 	}
416 
417 	*sc->sc_dmalen -= trans;
418 	*sc->sc_dmaaddr += trans;
419 
420 #if 0	/* this is not normal operation just yet */
421 	if (*sc->sc_dmalen == 0 ||
422 	    nsc->sc_phase != nsc->sc_prevphase)
423 		return 0;
424 
425 	/* and again */
426 	dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMA_GCSR(sc) & D_WRITE);
427 	return 1;
428 #endif
429 	return 0;
430 }
431