xref: /netbsd-src/sys/arch/sparc/dev/sw.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: sw.c,v 1.23 2011/07/01 18:50:41 dyoung Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This file contains only the machine-dependent parts of the
34  * Sun4 SCSI driver.  (Autoconfig stuff and DMA functions.)
35  * The machine-independent parts are in ncr5380sbc.c
36  *
37  * Supported hardware includes:
38  * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
39  * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
40  *
41  * The VME variant has a bit to enable or disable the DMA engine,
42  * but that bit also gates the interrupt line from the NCR5380!
43  * Therefore, in order to get any interrupt from the 5380, (i.e.
44  * for reselect) one must clear the DMA engine transfer count and
45  * then enable DMA.  This has the further complication that you
46  * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
47  * we have to turn DMA back off before we even look at the 5380.
48  *
49  * What wonderfully whacky hardware this is!
50  *
51  * David Jones wrote the initial version of this module for NetBSD/sun3,
52  * which included support for the VME adapter only. (no reselection).
53  *
54  * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
55  * both the VME and OBIO code to support disconnect/reselect.
56  * (Required figuring out the hardware "features" noted above.)
57  *
58  * The autoconfiguration boilerplate came from Adam Glass.
59  *
60  * Jason R. Thorpe ported the autoconfiguration and VME portions to
61  * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
62  * a wacky OBIO variant of the VME SCSI-3.  Many thanks to Chuck Cranor
63  * for lots of helpful tips and suggestions.  Thanks also to Paul Kranenburg
64  * and Chris Torek for bits of insight needed along the way.  Thanks to
65  * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
66  * for the sake of testing.  Andrew Gillham helped work out the bugs
67  * the 4/100 DMA code.
68  */
69 
70 /*
71  * NOTE: support for the 4/100 "SCSI Weird" is not complete!  DMA
72  * works, but interrupts (and, thus, reselection) don't.  I don't know
73  * why, and I don't have a machine to test this on further.
74  *
75  * DMA, DMA completion interrupts, and reselection work fine on my
76  * 4/260 with modern SCSI-II disks attached.  I've had reports of
77  * reselection failing on Sun Shoebox-type configurations where
78  * there are multiple non-SCSI devices behind Emulex or Adaptec
79  * bridges.  These devices pre-date the SCSI-I spec, and might not
80  * behave the way the 5380 code expects.  For this reason, only
81  * DMA is enabled by default in this driver.
82  *
83  *	Jason R. Thorpe <thorpej@NetBSD.org>
84  *	December 8, 1995
85  */
86 
87 #include <sys/cdefs.h>
88 __KERNEL_RCSID(0, "$NetBSD: sw.c,v 1.23 2011/07/01 18:50:41 dyoung Exp $");
89 
90 #include "opt_ddb.h"
91 
92 #include <sys/types.h>
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/malloc.h>
97 #include <sys/errno.h>
98 #include <sys/device.h>
99 #include <sys/buf.h>
100 
101 #include <sys/bus.h>
102 #include <machine/intr.h>
103 #include <machine/autoconf.h>
104 
105 #include <dev/scsipi/scsi_all.h>
106 #include <dev/scsipi/scsipi_all.h>
107 #include <dev/scsipi/scsipi_debug.h>
108 #include <dev/scsipi/scsiconf.h>
109 
110 #ifndef DDB
111 #define	Debugger()
112 #endif
113 
114 #ifndef DEBUG
115 #define DEBUG XXX
116 #endif
117 
118 #define COUNT_SW_LEFTOVERS	XXX	/* See sw DMA completion code */
119 
120 #include <dev/ic/ncr5380reg.h>
121 #include <dev/ic/ncr5380var.h>
122 
123 #include <sparc/dev/swreg.h>
124 
125 /*
126  * Transfers smaller than this are done using PIO
127  * (on assumption they're not worth DMA overhead)
128  */
129 #define	MIN_DMA_LEN 128
130 
131 /*
132  * Transfers lager than 65535 bytes need to be split-up.
133  * (Some of the FIFO logic has only 16 bits counters.)
134  * Make the size an integer multiple of the page size
135  * to avoid buf/cluster remap problems.  (paranoid?)
136  */
137 #define	MAX_DMA_LEN 0xE000
138 
139 #ifdef	DEBUG
140 int sw_debug = 0;
141 #endif
142 
143 /*
144  * This structure is used to keep track of mapped DMA requests.
145  */
146 struct sw_dma_handle {
147 	int 		dh_flags;
148 #define	SIDH_BUSY	0x01		/* This DH is in use */
149 #define	SIDH_OUT	0x02		/* DMA does data out (write) */
150 	u_char		*dh_addr;	/* KVA of start of buffer */
151 	int 		dh_maplen;	/* Original data length */
152 	long		dh_startingpa;	/* PA of buffer; for "sw" */
153 	bus_dmamap_t	dh_dmamap;
154 #define dh_dvma	dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
155 };
156 
157 /*
158  * The first structure member has to be the ncr5380_softc
159  * so we can just cast to go back and fourth between them.
160  */
161 struct sw_softc {
162 	struct ncr5380_softc	ncr_sc;
163 	bus_space_tag_t		sc_bustag;	/* bus tags */
164 	bus_dma_tag_t		sc_dmatag;
165 
166 	struct sw_dma_handle *sc_dma;
167 	int		sc_xlen;	/* length of current DMA segment. */
168 	int		sc_options;	/* options for this instance. */
169 };
170 
171 /*
172  * Options.  By default, DMA is enabled and DMA completion interrupts
173  * and reselect are disabled.  You may enable additional features
174  * the `flags' directive in your kernel's configuration file.
175  *
176  * Alternatively, you can patch your kernel with DDB or some other
177  * mechanism.  The sc_options member of the softc is OR'd with
178  * the value in sw_options.
179  *
180  * On the "sw", interrupts (and thus) reselection don't work, so they're
181  * disabled by default.  DMA is still a little dangerous, too.
182  *
183  * Note, there's a separate sw_options to make life easier.
184  */
185 #define	SW_ENABLE_DMA	0x01	/* Use DMA (maybe polled) */
186 #define	SW_DMA_INTR	0x02	/* DMA completion interrupts */
187 #define	SW_DO_RESELECT	0x04	/* Allow disconnect/reselect */
188 #define	SW_OPTIONS_MASK	(SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
189 #define SW_OPTIONS_BITS	"\10\3RESELECT\2DMA_INTR\1DMA"
190 int sw_options = SW_ENABLE_DMA;
191 
192 static int	sw_match(device_t, cfdata_t, void *);
193 static void	sw_attach(device_t, device_t, void *);
194 static int	sw_intr(void *);
195 static void	sw_reset_adapter(struct ncr5380_softc *);
196 static void	sw_minphys(struct buf *);
197 
198 void	sw_dma_alloc(struct ncr5380_softc *);
199 void	sw_dma_free(struct ncr5380_softc *);
200 void	sw_dma_poll(struct ncr5380_softc *);
201 
202 void	sw_dma_setup(struct ncr5380_softc *);
203 void	sw_dma_start(struct ncr5380_softc *);
204 void	sw_dma_eop(struct ncr5380_softc *);
205 void	sw_dma_stop(struct ncr5380_softc *);
206 
207 void	sw_intr_on(struct ncr5380_softc *);
208 void	sw_intr_off(struct ncr5380_softc *);
209 
210 /* Shorthand bus space access */
211 #define SWREG_READ(sc, index) \
212 	bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
213 #define SWREG_WRITE(sc, index, v) \
214 	bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
215 
216 
217 /* The Sun "SCSI Weird" 4/100 obio controller. */
218 CFATTACH_DECL_NEW(sw, sizeof(struct sw_softc),
219     sw_match, sw_attach, NULL, NULL);
220 
221 static int
222 sw_match(device_t parent, cfdata_t cf, void *aux)
223 {
224 	union obio_attach_args *uoba = aux;
225 	struct obio4_attach_args *oba;
226 
227 	/* Nothing but a Sun 4/100 is going to have these devices. */
228 	if (cpuinfo.cpu_type != CPUTYP_4_100)
229 		return (0);
230 
231 	if (uoba->uoba_isobio4 == 0)
232 		return (0);
233 
234 	/* Make sure there is something there... */
235 	oba = &uoba->uoba_oba4;
236 	return (bus_space_probe(oba->oba_bustag, oba->oba_paddr,
237 				1,	/* probe size */
238 				1,	/* offset */
239 				0,	/* flags */
240 				NULL, NULL));
241 }
242 
243 static void
244 sw_attach(device_t parent, device_t self, void *aux)
245 {
246 	struct sw_softc *sc = device_private(self);
247 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
248 	union obio_attach_args *uoba = aux;
249 	struct obio4_attach_args *oba = &uoba->uoba_oba4;
250 	bus_space_handle_t bh;
251 	char bits[64];
252 	int i;
253 
254 	ncr_sc->sc_dev = self;
255 	sc->sc_dmatag = oba->oba_dmatag;
256 
257 	/* Map the controller registers. */
258 	if (bus_space_map(oba->oba_bustag, oba->oba_paddr,
259 			  SWREG_BANK_SZ,
260 			  BUS_SPACE_MAP_LINEAR,
261 			  &bh) != 0) {
262 		aprint_error(": cannot map registers\n");
263 		return;
264 	}
265 
266 	ncr_sc->sc_regt = oba->oba_bustag;
267 	ncr_sc->sc_regh = bh;
268 
269 	sc->sc_options = sw_options;
270 
271 	ncr_sc->sc_dma_setup = sw_dma_setup;
272 	ncr_sc->sc_dma_start = sw_dma_start;
273 	ncr_sc->sc_dma_eop   = sw_dma_stop;
274 	ncr_sc->sc_dma_stop  = sw_dma_stop;
275 	ncr_sc->sc_intr_on   = sw_intr_on;
276 	ncr_sc->sc_intr_off  = sw_intr_off;
277 
278 	/*
279 	 * Establish interrupt channel.
280 	 * Default interrupt priority always is 3.  At least, that's
281 	 * what my board seems to be at.  --thorpej
282 	 */
283 	if (oba->oba_pri == -1)
284 		oba->oba_pri = 3;
285 
286 	(void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO,
287 				 sw_intr, sc);
288 
289 	aprint_normal(" pri %d\n", oba->oba_pri);
290 
291 
292 	/*
293 	 * Pull in the options flags.  Allow the user to completely
294 	 * override the default values.
295 	 */
296 	if ((device_cfdata(self)->cf_flags & SW_OPTIONS_MASK) != 0)
297 		sc->sc_options =
298 		    device_cfdata(self)->cf_flags & SW_OPTIONS_MASK;
299 
300 	/*
301 	 * Initialize fields used by the MI code
302 	 */
303 
304 	/* NCR5380 register bank offsets */
305 	ncr_sc->sci_r0 = 0;
306 	ncr_sc->sci_r1 = 1;
307 	ncr_sc->sci_r2 = 2;
308 	ncr_sc->sci_r3 = 3;
309 	ncr_sc->sci_r4 = 4;
310 	ncr_sc->sci_r5 = 5;
311 	ncr_sc->sci_r6 = 6;
312 	ncr_sc->sci_r7 = 7;
313 
314 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
315 
316 	/*
317 	 * MD function pointers used by the MI code.
318 	 */
319 	ncr_sc->sc_pio_out = ncr5380_pio_out;
320 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
321 	ncr_sc->sc_dma_alloc = sw_dma_alloc;
322 	ncr_sc->sc_dma_free  = sw_dma_free;
323 	ncr_sc->sc_dma_poll  = sw_dma_poll;
324 
325 	ncr_sc->sc_flags = 0;
326 	if ((sc->sc_options & SW_DO_RESELECT) == 0)
327 		ncr_sc->sc_no_disconnect = 0xFF;
328 	if ((sc->sc_options & SW_DMA_INTR) == 0)
329 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
330 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
331 
332 
333 	/*
334 	 * Allocate DMA handles.
335 	 */
336 	i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
337 	sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
338 	if (sc->sc_dma == NULL)
339 		panic("sw: DMA handle malloc failed");
340 
341 	for (i = 0; i < SCI_OPENINGS; i++) {
342 		sc->sc_dma[i].dh_flags = 0;
343 
344 		/* Allocate a DMA handle */
345 		if (bus_dmamap_create(
346 				sc->sc_dmatag,	/* tag */
347 				MAXPHYS,	/* size */
348 				1,		/* nsegments */
349 				MAXPHYS,	/* maxsegsz */
350 				0,		/* boundary */
351 				BUS_DMA_NOWAIT,
352 				&sc->sc_dma[i].dh_dmamap) != 0) {
353 
354 			aprint_error_dev(self, "DMA buffer map create error\n");
355 			return;
356 		}
357 	}
358 
359 	if (sc->sc_options) {
360 		snprintb(bits, sizeof(bits),
361 		    SW_OPTIONS_BITS, sc->sc_options);
362 		aprint_normal_dev(self, "options=%s\n", bits);
363 	}
364 
365 	ncr_sc->sc_channel.chan_id = 7;
366 	ncr_sc->sc_adapter.adapt_minphys = sw_minphys;
367 
368 	/* Initialize sw board */
369 	sw_reset_adapter(ncr_sc);
370 
371 	/* Attach the ncr5380 chip driver */
372 	ncr5380_attach(ncr_sc);
373 }
374 
375 static void
376 sw_minphys(struct buf *bp)
377 {
378 
379 	if (bp->b_bcount > MAX_DMA_LEN) {
380 #ifdef DEBUG
381 		if (sw_debug) {
382 			printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
383 			Debugger();
384 		}
385 #endif
386 		bp->b_bcount = MAX_DMA_LEN;
387 	}
388 	minphys(bp);
389 }
390 
391 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
392 	SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
393 
394 static int
395 sw_intr(void *arg)
396 {
397 	struct sw_softc *sc = arg;
398 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
399 	int dma_error, claimed;
400 	u_short csr;
401 
402 	claimed = 0;
403 	dma_error = 0;
404 
405 	/* SBC interrupt? DMA interrupt? */
406 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
407 
408 	NCR_TRACE("sw_intr: csr=0x%x\n", csr);
409 
410 	if (csr & SW_CSR_DMA_CONFLICT) {
411 		dma_error |= SW_CSR_DMA_CONFLICT;
412 		printf("%s: DMA conflict\n", __func__);
413 	}
414 	if (csr & SW_CSR_DMA_BUS_ERR) {
415 		dma_error |= SW_CSR_DMA_BUS_ERR;
416 		printf("%s: DMA bus error\n", __func__);
417 	}
418 	if (dma_error) {
419 		if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
420 			sc->ncr_sc.sc_state |= NCR_ABORTING;
421 		/* Make sure we will call the main isr. */
422 		csr |= SW_CSR_DMA_IP;
423 	}
424 
425 	if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
426 		claimed = ncr5380_intr(&sc->ncr_sc);
427 #ifdef DEBUG
428 		if (!claimed) {
429 			printf("%s: spurious from SBC\n", __func__);
430 			if (sw_debug & 4) {
431 				Debugger();	/* XXX */
432 			}
433 		}
434 #endif
435 	}
436 
437 	return claimed;
438 }
439 
440 
441 static void
442 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
443 {
444 
445 #ifdef	DEBUG
446 	if (sw_debug) {
447 		printf("%s\n", __func__);
448 	}
449 #endif
450 
451 	/*
452 	 * The reset bits in the CSR are active low.
453 	 */
454 	SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
455 	delay(10);
456 	SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
457 
458 	SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
459 	SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
460 	delay(10);
461 	SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
462 
463 	SCI_CLR_INTR(ncr_sc);
464 }
465 
466 
467 /*****************************************************************
468  * Common functions for DMA
469  ****************************************************************/
470 
471 /*
472  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
473  * for DMA transfer.  On the Sun4, this means mapping the buffer
474  * into DVMA space.
475  */
476 void
477 sw_dma_alloc(struct ncr5380_softc *ncr_sc)
478 {
479 	struct sw_softc *sc = (struct sw_softc *)ncr_sc;
480 	struct sci_req *sr = ncr_sc->sc_current;
481 	struct scsipi_xfer *xs = sr->sr_xs;
482 	struct sw_dma_handle *dh;
483 	int i, xlen;
484 	u_long addr;
485 
486 #ifdef DIAGNOSTIC
487 	if (sr->sr_dma_hand != NULL)
488 		panic("%s: already have DMA handle", __func__);
489 #endif
490 
491 #if 1	/* XXX - Temporary */
492 	/* XXX - In case we think DMA is completely broken... */
493 	if ((sc->sc_options & SW_ENABLE_DMA) == 0)
494 		return;
495 #endif
496 
497 	addr = (u_long)ncr_sc->sc_dataptr;
498 	xlen = ncr_sc->sc_datalen;
499 
500 	/* If the DMA start addr is misaligned then do PIO */
501 	if ((addr & 1) || (xlen & 1)) {
502 		printf("%s: misaligned.\n", __func__);
503 		return;
504 	}
505 
506 	/* Make sure our caller checked sc_min_dma_len. */
507 	if (xlen < MIN_DMA_LEN)
508 		panic("%s: xlen=0x%x", __func__, xlen);
509 
510 	/* Find free DMA handle.  Guaranteed to find one since we have
511 	   as many DMA handles as the driver has processes. */
512 	for (i = 0; i < SCI_OPENINGS; i++) {
513 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
514 			goto found;
515 	}
516 	panic("sw: no free DMA handles.");
517 
518 found:
519 	dh = &sc->sc_dma[i];
520 	dh->dh_flags = SIDH_BUSY;
521 	dh->dh_addr = (u_char *)addr;
522 	dh->dh_maplen  = xlen;
523 
524 	/* Copy the "write" flag for convenience. */
525 	if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
526 		dh->dh_flags |= SIDH_OUT;
527 
528 	/*
529 	 * Double-map the buffer into DVMA space.  If we can't re-map
530 	 * the buffer, we print a warning and fall back to PIO mode.
531 	 *
532 	 * NOTE: it is not safe to sleep here!
533 	 */
534 	if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
535 			    (void *)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
536 		/* Can't remap segment */
537 		printf("%s: can't remap 0x%lx/0x%x, doing PIO\n",
538 		    __func__, addr, dh->dh_maplen);
539 		dh->dh_flags = 0;
540 		return;
541 	}
542 	bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
543 			(dh->dh_flags & SIDH_OUT)
544 				? BUS_DMASYNC_PREWRITE
545 				: BUS_DMASYNC_PREREAD);
546 
547 	/* success */
548 	sr->sr_dma_hand = dh;
549 }
550 
551 
552 void
553 sw_dma_free(struct ncr5380_softc *ncr_sc)
554 {
555 	struct sw_softc *sc = (struct sw_softc *)ncr_sc;
556 	struct sci_req *sr = ncr_sc->sc_current;
557 	struct sw_dma_handle *dh = sr->sr_dma_hand;
558 
559 #ifdef DIAGNOSTIC
560 	if (dh == NULL)
561 		panic("%s: no DMA handle", __func__);
562 #endif
563 
564 	if (ncr_sc->sc_state & NCR_DOINGDMA)
565 		panic("%s: free while in progress", __func__);
566 
567 	if (dh->dh_flags & SIDH_BUSY) {
568 		/* Give back the DVMA space. */
569 		bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
570 				dh->dh_dvma, dh->dh_maplen,
571 				(dh->dh_flags & SIDH_OUT)
572 					? BUS_DMASYNC_POSTWRITE
573 					: BUS_DMASYNC_POSTREAD);
574 		bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
575 		dh->dh_flags = 0;
576 	}
577 	sr->sr_dma_hand = NULL;
578 }
579 
580 
581 /*
582  * Poll (spin-wait) for DMA completion.
583  * Called right after xx_dma_start(), and
584  * xx_dma_stop() will be called next.
585  * Same for either VME or OBIO.
586  */
587 void
588 sw_dma_poll(struct ncr5380_softc *ncr_sc)
589 {
590 	struct sci_req *sr = ncr_sc->sc_current;
591 	int tmo, csr_mask, csr;
592 
593 	/* Make sure DMA started successfully. */
594 	if (ncr_sc->sc_state & NCR_ABORTING)
595 		return;
596 
597 	csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
598 	    SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
599 
600 	tmo = 50000;	/* X100 = 5 sec. */
601 	for (;;) {
602 		csr = SWREG_READ(ncr_sc, SWREG_CSR);
603 		if (csr & csr_mask)
604 			break;
605 		if (--tmo <= 0) {
606 			printf("%s: DMA timeout (while polling)\n",
607 			    device_xname(ncr_sc->sc_dev));
608 			/* Indicate timeout as MI code would. */
609 			sr->sr_flags |= SR_OVERDUE;
610 			break;
611 		}
612 		delay(100);
613 	}
614 
615 #ifdef	DEBUG
616 	if (sw_debug) {
617 		printf("%s: done, csr=0x%x\n", __func__, csr);
618 	}
619 #endif
620 }
621 
622 
623 /*
624  * This is called when the bus is going idle,
625  * so we want to enable the SBC interrupts.
626  * That is controlled by the DMA enable!
627  * Who would have guessed!
628  * What a NASTY trick!
629  *
630  * XXX THIS MIGHT NOT WORK RIGHT!
631  */
632 void
633 sw_intr_on(struct ncr5380_softc *ncr_sc)
634 {
635 	uint32_t csr;
636 
637 	sw_dma_setup(ncr_sc);
638 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
639 	csr |= SW_CSR_DMA_EN;	/* XXX - this bit is for vme only?! */
640 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
641 }
642 
643 /*
644  * This is called when the bus is idle and we are
645  * about to start playing with the SBC chip.
646  *
647  * XXX THIS MIGHT NOT WORK RIGHT!
648  */
649 void
650 sw_intr_off(struct ncr5380_softc *ncr_sc)
651 {
652 	uint32_t csr;
653 
654 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
655 	csr &= ~SW_CSR_DMA_EN;
656 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
657 }
658 
659 
660 /*
661  * This function is called during the COMMAND or MSG_IN phase
662  * that precedes a DATA_IN or DATA_OUT phase, in case we need
663  * to setup the DMA engine before the bus enters a DATA phase.
664  *
665  * On the OBIO version we just clear the DMA count and address
666  * here (to make sure it stays idle) and do the real setup
667  * later, in dma_start.
668  */
669 void
670 sw_dma_setup(struct ncr5380_softc *ncr_sc)
671 {
672 	uint32_t csr;
673 
674 	/* No FIFO to reset on "sw". */
675 
676 	/* Set direction (assume recv here) */
677 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
678 	csr &= ~SW_CSR_SEND;
679 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
680 
681 	SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
682 	SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
683 }
684 
685 
686 void
687 sw_dma_start(struct ncr5380_softc *ncr_sc)
688 {
689 	struct sw_softc *sc = (struct sw_softc *)ncr_sc;
690 	struct sci_req *sr = ncr_sc->sc_current;
691 	struct sw_dma_handle *dh = sr->sr_dma_hand;
692 	u_long dva;
693 	int xlen, adj, adjlen;
694 	u_int mode;
695 	uint32_t csr;
696 
697 	/*
698 	 * Get the DVMA mapping for this segment.
699 	 */
700 	dva = (u_long)(dh->dh_dvma);
701 	if (dva & 1)
702 		panic("%s: bad dva=0x%lx", __func__, dva);
703 
704 	xlen = ncr_sc->sc_datalen;
705 	xlen &= ~1;
706 	sc->sc_xlen = xlen;	/* XXX: or less... */
707 
708 #ifdef	DEBUG
709 	if (sw_debug & 2) {
710 		printf("%s: dh=%p, dva=0x%lx, xlen=%d\n",
711 		    __func__, dh, dva, xlen);
712 	}
713 #endif
714 
715 	/*
716 	 * Set up the DMA controller.
717 	 * Note that (dh->dh_len < sc_datalen)
718 	 */
719 
720 	/* Set direction (send/recv) */
721 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
722 	if (dh->dh_flags & SIDH_OUT) {
723 		csr |= SW_CSR_SEND;
724 	} else {
725 		csr &= ~SW_CSR_SEND;
726 	}
727 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
728 
729 	/*
730 	 * The "sw" needs longword aligned transfers.  We
731 	 * detect a shortword aligned transfer here, and adjust the
732 	 * DMA transfer by 2 bytes.  These two bytes are read/written
733 	 * in PIO mode just before the DMA is started.
734 	 */
735 	adj = 0;
736 	if (dva & 2) {
737 		adj = 2;
738 #ifdef DEBUG
739 		if (sw_debug & 2)
740 			printf("%s: adjusted up %d bytes\n", __func__, adj);
741 #endif
742 	}
743 
744 	/* We have to frob the address on the "sw". */
745 	dh->dh_startingpa = (dva | 0xF00000);
746 	SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
747 	SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
748 
749 	/*
750 	 * Acknowledge the phase change.  (After DMA setup!)
751 	 * Put the SBIC into DMA mode, and start the transfer.
752 	 */
753 	if (dh->dh_flags & SIDH_OUT) {
754 		NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
755 		if (adj) {
756 			adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
757 			    adj, dh->dh_addr);
758 			if (adjlen != adj)
759 				printf("%s: bad outgoing adj, %d != %d\n",
760 				    device_xname(ncr_sc->sc_dev), adjlen, adj);
761 		}
762 		SCI_CLR_INTR(ncr_sc);
763 		NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
764 		mode = NCR5380_READ(ncr_sc, sci_mode);
765 		mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
766 		NCR5380_WRITE(ncr_sc, sci_mode, mode);
767 		NCR5380_WRITE(ncr_sc, sci_dma_send, 0); 	/* start it */
768 	} else {
769 		NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
770 		if (adj) {
771 			adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
772 			    adj, dh->dh_addr);
773 			if (adjlen != adj)
774 				printf("%s: bad incoming adj, %d != %d\n",
775 				    device_xname(ncr_sc->sc_dev), adjlen, adj);
776 		}
777 		SCI_CLR_INTR(ncr_sc);
778 		NCR5380_WRITE(ncr_sc, sci_icmd, 0);
779 		mode = NCR5380_READ(ncr_sc, sci_mode);
780 		mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
781 		NCR5380_WRITE(ncr_sc, sci_mode, mode);
782 		NCR5380_WRITE(ncr_sc, sci_irecv, 0); 	/* start it */
783 	}
784 
785 	/* Let'er rip! */
786 	csr |= SW_CSR_DMA_EN;
787 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
788 
789 	ncr_sc->sc_state |= NCR_DOINGDMA;
790 
791 #ifdef	DEBUG
792 	if (sw_debug & 2) {
793 		printf("%s: started, flags=0x%x\n",
794 		    __func__, ncr_sc->sc_state);
795 	}
796 #endif
797 }
798 
799 
800 void
801 sw_dma_eop(struct ncr5380_softc *ncr_sc)
802 {
803 
804 	/* Not needed - DMA was stopped prior to examining sci_csr */
805 }
806 
807 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
808 #define COUNT_SW_LEFTOVERS
809 #endif
810 #ifdef COUNT_SW_LEFTOVERS
811 /*
812  * Let's find out how often these occur.  Read these with DDB from time
813  * to time.
814  */
815 int	sw_3_leftover = 0;
816 int	sw_2_leftover = 0;
817 int	sw_1_leftover = 0;
818 int	sw_0_leftover = 0;
819 #endif
820 
821 void
822 sw_dma_stop(struct ncr5380_softc *ncr_sc)
823 {
824 	struct sci_req *sr = ncr_sc->sc_current;
825 	struct sw_dma_handle *dh = sr->sr_dma_hand;
826 	int ntrans = 0, dva;
827 	u_int mode;
828 	uint32_t csr;
829 
830 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
831 #ifdef	DEBUG
832 		printf("%s: DMA not running\n", __func__);
833 #endif
834 		return;
835 	}
836 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
837 
838 	/* First, halt the DMA engine. */
839 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
840 	csr &= ~SW_CSR_DMA_EN;
841 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
842 
843 	/*
844 	 * XXX HARDWARE BUG!
845 	 * Apparently, some early 4/100 SCSI controllers had a hardware
846 	 * bug that caused the controller to do illegal memory access.
847 	 * We see this as SW_CSR_DMA_BUS_ERR (makes sense).  To work around
848 	 * this, we simply need to clean up after ourselves ... there will
849 	 * be as many as 3 bytes left over.  Since we clean up "left-over"
850 	 * bytes on every read anyway, we just continue to chug along
851 	 * if SW_CSR_DMA_BUS_ERR is asserted.  (This was probably worked
852 	 * around in hardware later with the "left-over byte" indicator
853 	 * in the VME controller.)
854 	 */
855 #if 0
856 	if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR))
857 #else
858 	if (csr & (SW_CSR_DMA_CONFLICT))
859 #endif
860 	{
861 		printf("sw: DMA error, csr=0x%x, reset\n", csr);
862 		sr->sr_xs->error = XS_DRIVER_STUFFUP;
863 		ncr_sc->sc_state |= NCR_ABORTING;
864 		sw_reset_adapter(ncr_sc);
865 	}
866 
867 	/* Note that timeout may have set the error flag. */
868 	if (ncr_sc->sc_state & NCR_ABORTING)
869 		goto out;
870 
871 	/*
872 	 * Now try to figure out how much actually transferred
873 	 *
874 	 * The "sw" doesn't have a FIFO or a bcr, so we've stored
875 	 * the starting PA of the transfer in the DMA handle,
876 	 * and subtract it from the ending PA left in the dma_addr
877 	 * register.
878 	 */
879 	dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
880 	ntrans = (dva - dh->dh_startingpa);
881 
882 #ifdef	DEBUG
883 	if (sw_debug & 2) {
884 		printf("%s: ntrans=0x%x\n", __func__, ntrans);
885 	}
886 #endif
887 
888 	if (ntrans > ncr_sc->sc_datalen)
889 		panic("%s: excess transfer", __func__);
890 
891 	/* Adjust data pointer */
892 	ncr_sc->sc_dataptr += ntrans;
893 	ncr_sc->sc_datalen -= ntrans;
894 
895 	/*
896 	 * After a read, we may need to clean-up
897 	 * "Left-over bytes"  (yuck!)  The "sw" doesn't
898 	 * have a "left-over" indicator, so we have to so
899 	 * this no matter what.  Ick.
900 	 */
901 	if ((dh->dh_flags & SIDH_OUT) == 0) {
902 		char *cp = ncr_sc->sc_dataptr;
903 		uint32_t bpr;
904 
905 		bpr = SWREG_READ(ncr_sc, SWREG_BPR);
906 
907 		switch (dva & 3) {
908 		case 3:
909 			cp[0] = (bpr & 0xff000000) >> 24;
910 			cp[1] = (bpr & 0x00ff0000) >> 16;
911 			cp[2] = (bpr & 0x0000ff00) >> 8;
912 #ifdef COUNT_SW_LEFTOVERS
913 			++sw_3_leftover;
914 #endif
915 			break;
916 
917 		case 2:
918 			cp[0] = (bpr & 0xff000000) >> 24;
919 			cp[1] = (bpr & 0x00ff0000) >> 16;
920 #ifdef COUNT_SW_LEFTOVERS
921 			++sw_2_leftover;
922 #endif
923 			break;
924 
925 		case 1:
926 			cp[0] = (bpr & 0xff000000) >> 24;
927 #ifdef COUNT_SW_LEFTOVERS
928 			++sw_1_leftover;
929 #endif
930 			break;
931 
932 #ifdef COUNT_SW_LEFTOVERS
933 		default:
934 			++sw_0_leftover;
935 			break;
936 #endif
937 		}
938 	}
939 
940  out:
941 	SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
942 	SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
943 
944 	/* Put SBIC back in PIO mode. */
945 	mode = NCR5380_READ(ncr_sc, sci_mode);
946 	mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
947 	NCR5380_WRITE(ncr_sc, sci_mode, mode);
948 	NCR5380_WRITE(ncr_sc, sci_icmd, 0);
949 
950 #ifdef DEBUG
951 	if (sw_debug & 2) {
952 		printf("%s: ntrans=0x%x\n", __func__, ntrans);
953 	}
954 #endif
955 }
956