xref: /netbsd-src/sys/arch/sparc/dev/sw.c (revision 8ac07aec990b9d2e483062509d0a9fa5b4f57cf2)
1 /*	$NetBSD: sw.c,v 1.20 2008/04/04 16:00:57 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * This file contains only the machine-dependent parts of the
41  * Sun4 SCSI driver.  (Autoconfig stuff and DMA functions.)
42  * The machine-independent parts are in ncr5380sbc.c
43  *
44  * Supported hardware includes:
45  * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
46  * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
47  *
48  * The VME variant has a bit to enable or disable the DMA engine,
49  * but that bit also gates the interrupt line from the NCR5380!
50  * Therefore, in order to get any interrupt from the 5380, (i.e.
51  * for reselect) one must clear the DMA engine transfer count and
52  * then enable DMA.  This has the further complication that you
53  * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
54  * we have to turn DMA back off before we even look at the 5380.
55  *
56  * What wonderfully whacky hardware this is!
57  *
58  * David Jones wrote the initial version of this module for NetBSD/sun3,
59  * which included support for the VME adapter only. (no reselection).
60  *
61  * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
62  * both the VME and OBIO code to support disconnect/reselect.
63  * (Required figuring out the hardware "features" noted above.)
64  *
65  * The autoconfiguration boilerplate came from Adam Glass.
66  *
67  * Jason R. Thorpe ported the autoconfiguration and VME portions to
68  * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
69  * a wacky OBIO variant of the VME SCSI-3.  Many thanks to Chuck Cranor
70  * for lots of helpful tips and suggestions.  Thanks also to Paul Kranenburg
71  * and Chris Torek for bits of insight needed along the way.  Thanks to
72  * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
73  * for the sake of testing.  Andrew Gillham helped work out the bugs
74  * the 4/100 DMA code.
75  */
76 
77 /*
78  * NOTE: support for the 4/100 "SCSI Weird" is not complete!  DMA
79  * works, but interrupts (and, thus, reselection) don't.  I don't know
80  * why, and I don't have a machine to test this on further.
81  *
82  * DMA, DMA completion interrupts, and reselection work fine on my
83  * 4/260 with modern SCSI-II disks attached.  I've had reports of
84  * reselection failing on Sun Shoebox-type configurations where
85  * there are multiple non-SCSI devices behind Emulex or Adaptec
86  * bridges.  These devices pre-date the SCSI-I spec, and might not
87  * behave the way the 5380 code expects.  For this reason, only
88  * DMA is enabled by default in this driver.
89  *
90  *	Jason R. Thorpe <thorpej@NetBSD.org>
91  *	December 8, 1995
92  */
93 
94 #include <sys/cdefs.h>
95 __KERNEL_RCSID(0, "$NetBSD: sw.c,v 1.20 2008/04/04 16:00:57 tsutsui Exp $");
96 
97 #include "opt_ddb.h"
98 
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/systm.h>
102 #include <sys/kernel.h>
103 #include <sys/malloc.h>
104 #include <sys/errno.h>
105 #include <sys/device.h>
106 #include <sys/buf.h>
107 
108 #include <machine/bus.h>
109 #include <machine/intr.h>
110 #include <machine/autoconf.h>
111 
112 #include <dev/scsipi/scsi_all.h>
113 #include <dev/scsipi/scsipi_all.h>
114 #include <dev/scsipi/scsipi_debug.h>
115 #include <dev/scsipi/scsiconf.h>
116 
117 #ifndef DDB
118 #define	Debugger()
119 #endif
120 
121 #ifndef DEBUG
122 #define DEBUG XXX
123 #endif
124 
125 #define COUNT_SW_LEFTOVERS	XXX	/* See sw DMA completion code */
126 
127 #include <dev/ic/ncr5380reg.h>
128 #include <dev/ic/ncr5380var.h>
129 
130 #include <sparc/dev/swreg.h>
131 
132 /*
133  * Transfers smaller than this are done using PIO
134  * (on assumption they're not worth DMA overhead)
135  */
136 #define	MIN_DMA_LEN 128
137 
138 /*
139  * Transfers lager than 65535 bytes need to be split-up.
140  * (Some of the FIFO logic has only 16 bits counters.)
141  * Make the size an integer multiple of the page size
142  * to avoid buf/cluster remap problems.  (paranoid?)
143  */
144 #define	MAX_DMA_LEN 0xE000
145 
146 #ifdef	DEBUG
147 int sw_debug = 0;
148 #endif
149 
150 /*
151  * This structure is used to keep track of mapped DMA requests.
152  */
153 struct sw_dma_handle {
154 	int 		dh_flags;
155 #define	SIDH_BUSY	0x01		/* This DH is in use */
156 #define	SIDH_OUT	0x02		/* DMA does data out (write) */
157 	u_char		*dh_addr;	/* KVA of start of buffer */
158 	int 		dh_maplen;	/* Original data length */
159 	long		dh_startingpa;	/* PA of buffer; for "sw" */
160 	bus_dmamap_t	dh_dmamap;
161 #define dh_dvma	dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
162 };
163 
164 /*
165  * The first structure member has to be the ncr5380_softc
166  * so we can just cast to go back and fourth between them.
167  */
168 struct sw_softc {
169 	struct ncr5380_softc	ncr_sc;
170 	bus_space_tag_t		sc_bustag;	/* bus tags */
171 	bus_dma_tag_t		sc_dmatag;
172 
173 	struct sw_dma_handle *sc_dma;
174 	int		sc_xlen;	/* length of current DMA segment. */
175 	int		sc_options;	/* options for this instance. */
176 };
177 
178 /*
179  * Options.  By default, DMA is enabled and DMA completion interrupts
180  * and reselect are disabled.  You may enable additional features
181  * the `flags' directive in your kernel's configuration file.
182  *
183  * Alternatively, you can patch your kernel with DDB or some other
184  * mechanism.  The sc_options member of the softc is OR'd with
185  * the value in sw_options.
186  *
187  * On the "sw", interrupts (and thus) reselection don't work, so they're
188  * disabled by default.  DMA is still a little dangerous, too.
189  *
190  * Note, there's a separate sw_options to make life easier.
191  */
192 #define	SW_ENABLE_DMA	0x01	/* Use DMA (maybe polled) */
193 #define	SW_DMA_INTR	0x02	/* DMA completion interrupts */
194 #define	SW_DO_RESELECT	0x04	/* Allow disconnect/reselect */
195 #define	SW_OPTIONS_MASK	(SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
196 #define SW_OPTIONS_BITS	"\10\3RESELECT\2DMA_INTR\1DMA"
197 int sw_options = SW_ENABLE_DMA;
198 
199 static int	sw_match(device_t, cfdata_t, void *);
200 static void	sw_attach(device_t, device_t, void *);
201 static int	sw_intr(void *);
202 static void	sw_reset_adapter(struct ncr5380_softc *);
203 static void	sw_minphys(struct buf *);
204 
205 void	sw_dma_alloc(struct ncr5380_softc *);
206 void	sw_dma_free(struct ncr5380_softc *);
207 void	sw_dma_poll(struct ncr5380_softc *);
208 
209 void	sw_dma_setup(struct ncr5380_softc *);
210 void	sw_dma_start(struct ncr5380_softc *);
211 void	sw_dma_eop(struct ncr5380_softc *);
212 void	sw_dma_stop(struct ncr5380_softc *);
213 
214 void	sw_intr_on(struct ncr5380_softc *);
215 void	sw_intr_off(struct ncr5380_softc *);
216 
217 /* Shorthand bus space access */
218 #define SWREG_READ(sc, index) \
219 	bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
220 #define SWREG_WRITE(sc, index, v) \
221 	bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
222 
223 
224 /* The Sun "SCSI Weird" 4/100 obio controller. */
225 CFATTACH_DECL_NEW(sw, sizeof(struct sw_softc),
226     sw_match, sw_attach, NULL, NULL);
227 
228 static int
229 sw_match(device_t parent, cfdata_t cf, void *aux)
230 {
231 	union obio_attach_args *uoba = aux;
232 	struct obio4_attach_args *oba;
233 
234 	/* Nothing but a Sun 4/100 is going to have these devices. */
235 	if (cpuinfo.cpu_type != CPUTYP_4_100)
236 		return (0);
237 
238 	if (uoba->uoba_isobio4 == 0)
239 		return (0);
240 
241 	/* Make sure there is something there... */
242 	oba = &uoba->uoba_oba4;
243 	return (bus_space_probe(oba->oba_bustag, oba->oba_paddr,
244 				1,	/* probe size */
245 				1,	/* offset */
246 				0,	/* flags */
247 				NULL, NULL));
248 }
249 
250 static void
251 sw_attach(device_t parent, device_t self, void *aux)
252 {
253 	struct sw_softc *sc = device_private(self);
254 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
255 	union obio_attach_args *uoba = aux;
256 	struct obio4_attach_args *oba = &uoba->uoba_oba4;
257 	bus_space_handle_t bh;
258 	char bits[64];
259 	int i;
260 
261 	ncr_sc->sc_dev = self;
262 	sc->sc_dmatag = oba->oba_dmatag;
263 
264 	/* Map the controller registers. */
265 	if (bus_space_map(oba->oba_bustag, oba->oba_paddr,
266 			  SWREG_BANK_SZ,
267 			  BUS_SPACE_MAP_LINEAR,
268 			  &bh) != 0) {
269 		aprint_error(": cannot map registers\n");
270 		return;
271 	}
272 
273 	ncr_sc->sc_regt = oba->oba_bustag;
274 	ncr_sc->sc_regh = bh;
275 
276 	sc->sc_options = sw_options;
277 
278 	ncr_sc->sc_dma_setup = sw_dma_setup;
279 	ncr_sc->sc_dma_start = sw_dma_start;
280 	ncr_sc->sc_dma_eop   = sw_dma_stop;
281 	ncr_sc->sc_dma_stop  = sw_dma_stop;
282 	ncr_sc->sc_intr_on   = sw_intr_on;
283 	ncr_sc->sc_intr_off  = sw_intr_off;
284 
285 	/*
286 	 * Establish interrupt channel.
287 	 * Default interrupt priority always is 3.  At least, that's
288 	 * what my board seems to be at.  --thorpej
289 	 */
290 	if (oba->oba_pri == -1)
291 		oba->oba_pri = 3;
292 
293 	(void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO,
294 				 sw_intr, sc);
295 
296 	aprint_normal(" pri %d\n", oba->oba_pri);
297 
298 
299 	/*
300 	 * Pull in the options flags.  Allow the user to completely
301 	 * override the default values.
302 	 */
303 	if ((device_cfdata(self)->cf_flags & SW_OPTIONS_MASK) != 0)
304 		sc->sc_options =
305 		    device_cfdata(self)->cf_flags & SW_OPTIONS_MASK;
306 
307 	/*
308 	 * Initialize fields used by the MI code
309 	 */
310 
311 	/* NCR5380 register bank offsets */
312 	ncr_sc->sci_r0 = 0;
313 	ncr_sc->sci_r1 = 1;
314 	ncr_sc->sci_r2 = 2;
315 	ncr_sc->sci_r3 = 3;
316 	ncr_sc->sci_r4 = 4;
317 	ncr_sc->sci_r5 = 5;
318 	ncr_sc->sci_r6 = 6;
319 	ncr_sc->sci_r7 = 7;
320 
321 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
322 
323 	/*
324 	 * MD function pointers used by the MI code.
325 	 */
326 	ncr_sc->sc_pio_out = ncr5380_pio_out;
327 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
328 	ncr_sc->sc_dma_alloc = sw_dma_alloc;
329 	ncr_sc->sc_dma_free  = sw_dma_free;
330 	ncr_sc->sc_dma_poll  = sw_dma_poll;
331 
332 	ncr_sc->sc_flags = 0;
333 	if ((sc->sc_options & SW_DO_RESELECT) == 0)
334 		ncr_sc->sc_no_disconnect = 0xFF;
335 	if ((sc->sc_options & SW_DMA_INTR) == 0)
336 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
337 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
338 
339 
340 	/*
341 	 * Allocate DMA handles.
342 	 */
343 	i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
344 	sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
345 	if (sc->sc_dma == NULL)
346 		panic("sw: DMA handle malloc failed");
347 
348 	for (i = 0; i < SCI_OPENINGS; i++) {
349 		sc->sc_dma[i].dh_flags = 0;
350 
351 		/* Allocate a DMA handle */
352 		if (bus_dmamap_create(
353 				sc->sc_dmatag,	/* tag */
354 				MAXPHYS,	/* size */
355 				1,		/* nsegments */
356 				MAXPHYS,	/* maxsegsz */
357 				0,		/* boundary */
358 				BUS_DMA_NOWAIT,
359 				&sc->sc_dma[i].dh_dmamap) != 0) {
360 
361 			aprint_error_dev(self, "DMA buffer map create error\n");
362 			return;
363 		}
364 	}
365 
366 	if (sc->sc_options) {
367 		aprint_normal_dev(self, "options=%s\n",
368 		    bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS,
369 		    bits, sizeof(bits)));
370 	}
371 
372 	ncr_sc->sc_channel.chan_id = 7;
373 	ncr_sc->sc_adapter.adapt_minphys = sw_minphys;
374 
375 	/* Initialize sw board */
376 	sw_reset_adapter(ncr_sc);
377 
378 	/* Attach the ncr5380 chip driver */
379 	ncr5380_attach(ncr_sc);
380 }
381 
382 static void
383 sw_minphys(struct buf *bp)
384 {
385 
386 	if (bp->b_bcount > MAX_DMA_LEN) {
387 #ifdef DEBUG
388 		if (sw_debug) {
389 			printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
390 			Debugger();
391 		}
392 #endif
393 		bp->b_bcount = MAX_DMA_LEN;
394 	}
395 	minphys(bp);
396 }
397 
398 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
399 	SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
400 
401 static int
402 sw_intr(void *arg)
403 {
404 	struct sw_softc *sc = arg;
405 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
406 	int dma_error, claimed;
407 	u_short csr;
408 
409 	claimed = 0;
410 	dma_error = 0;
411 
412 	/* SBC interrupt? DMA interrupt? */
413 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
414 
415 	NCR_TRACE("sw_intr: csr=0x%x\n", csr);
416 
417 	if (csr & SW_CSR_DMA_CONFLICT) {
418 		dma_error |= SW_CSR_DMA_CONFLICT;
419 		printf("%s: DMA conflict\n", __func__);
420 	}
421 	if (csr & SW_CSR_DMA_BUS_ERR) {
422 		dma_error |= SW_CSR_DMA_BUS_ERR;
423 		printf("%s: DMA bus error\n", __func__);
424 	}
425 	if (dma_error) {
426 		if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
427 			sc->ncr_sc.sc_state |= NCR_ABORTING;
428 		/* Make sure we will call the main isr. */
429 		csr |= SW_CSR_DMA_IP;
430 	}
431 
432 	if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
433 		claimed = ncr5380_intr(&sc->ncr_sc);
434 #ifdef DEBUG
435 		if (!claimed) {
436 			printf("%s: spurious from SBC\n", __func__);
437 			if (sw_debug & 4) {
438 				Debugger();	/* XXX */
439 			}
440 		}
441 #endif
442 	}
443 
444 	return claimed;
445 }
446 
447 
448 static void
449 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
450 {
451 
452 #ifdef	DEBUG
453 	if (sw_debug) {
454 		printf("%s\n", __func__);
455 	}
456 #endif
457 
458 	/*
459 	 * The reset bits in the CSR are active low.
460 	 */
461 	SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
462 	delay(10);
463 	SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
464 
465 	SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
466 	SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
467 	delay(10);
468 	SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
469 
470 	SCI_CLR_INTR(ncr_sc);
471 }
472 
473 
474 /*****************************************************************
475  * Common functions for DMA
476  ****************************************************************/
477 
478 /*
479  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
480  * for DMA transfer.  On the Sun4, this means mapping the buffer
481  * into DVMA space.
482  */
483 void
484 sw_dma_alloc(struct ncr5380_softc *ncr_sc)
485 {
486 	struct sw_softc *sc = (struct sw_softc *)ncr_sc;
487 	struct sci_req *sr = ncr_sc->sc_current;
488 	struct scsipi_xfer *xs = sr->sr_xs;
489 	struct sw_dma_handle *dh;
490 	int i, xlen;
491 	u_long addr;
492 
493 #ifdef DIAGNOSTIC
494 	if (sr->sr_dma_hand != NULL)
495 		panic("%s: already have DMA handle", __func__);
496 #endif
497 
498 #if 1	/* XXX - Temporary */
499 	/* XXX - In case we think DMA is completely broken... */
500 	if ((sc->sc_options & SW_ENABLE_DMA) == 0)
501 		return;
502 #endif
503 
504 	addr = (u_long)ncr_sc->sc_dataptr;
505 	xlen = ncr_sc->sc_datalen;
506 
507 	/* If the DMA start addr is misaligned then do PIO */
508 	if ((addr & 1) || (xlen & 1)) {
509 		printf("%s: misaligned.\n", __func__);
510 		return;
511 	}
512 
513 	/* Make sure our caller checked sc_min_dma_len. */
514 	if (xlen < MIN_DMA_LEN)
515 		panic("%s: xlen=0x%x", __func__, xlen);
516 
517 	/* Find free DMA handle.  Guaranteed to find one since we have
518 	   as many DMA handles as the driver has processes. */
519 	for (i = 0; i < SCI_OPENINGS; i++) {
520 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
521 			goto found;
522 	}
523 	panic("sw: no free DMA handles.");
524 
525 found:
526 	dh = &sc->sc_dma[i];
527 	dh->dh_flags = SIDH_BUSY;
528 	dh->dh_addr = (u_char *)addr;
529 	dh->dh_maplen  = xlen;
530 
531 	/* Copy the "write" flag for convenience. */
532 	if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
533 		dh->dh_flags |= SIDH_OUT;
534 
535 	/*
536 	 * Double-map the buffer into DVMA space.  If we can't re-map
537 	 * the buffer, we print a warning and fall back to PIO mode.
538 	 *
539 	 * NOTE: it is not safe to sleep here!
540 	 */
541 	if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
542 			    (void *)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
543 		/* Can't remap segment */
544 		printf("%s: can't remap 0x%lx/0x%x, doing PIO\n",
545 		    __func__, addr, dh->dh_maplen);
546 		dh->dh_flags = 0;
547 		return;
548 	}
549 	bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
550 			(dh->dh_flags & SIDH_OUT)
551 				? BUS_DMASYNC_PREWRITE
552 				: BUS_DMASYNC_PREREAD);
553 
554 	/* success */
555 	sr->sr_dma_hand = dh;
556 }
557 
558 
559 void
560 sw_dma_free(struct ncr5380_softc *ncr_sc)
561 {
562 	struct sw_softc *sc = (struct sw_softc *)ncr_sc;
563 	struct sci_req *sr = ncr_sc->sc_current;
564 	struct sw_dma_handle *dh = sr->sr_dma_hand;
565 
566 #ifdef DIAGNOSTIC
567 	if (dh == NULL)
568 		panic("%s: no DMA handle", __func__);
569 #endif
570 
571 	if (ncr_sc->sc_state & NCR_DOINGDMA)
572 		panic("%s: free while in progress", __func__);
573 
574 	if (dh->dh_flags & SIDH_BUSY) {
575 		/* Give back the DVMA space. */
576 		bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
577 				dh->dh_dvma, dh->dh_maplen,
578 				(dh->dh_flags & SIDH_OUT)
579 					? BUS_DMASYNC_POSTWRITE
580 					: BUS_DMASYNC_POSTREAD);
581 		bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
582 		dh->dh_flags = 0;
583 	}
584 	sr->sr_dma_hand = NULL;
585 }
586 
587 
588 /*
589  * Poll (spin-wait) for DMA completion.
590  * Called right after xx_dma_start(), and
591  * xx_dma_stop() will be called next.
592  * Same for either VME or OBIO.
593  */
594 void
595 sw_dma_poll(struct ncr5380_softc *ncr_sc)
596 {
597 	struct sci_req *sr = ncr_sc->sc_current;
598 	int tmo, csr_mask, csr;
599 
600 	/* Make sure DMA started successfully. */
601 	if (ncr_sc->sc_state & NCR_ABORTING)
602 		return;
603 
604 	csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
605 	    SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
606 
607 	tmo = 50000;	/* X100 = 5 sec. */
608 	for (;;) {
609 		csr = SWREG_READ(ncr_sc, SWREG_CSR);
610 		if (csr & csr_mask)
611 			break;
612 		if (--tmo <= 0) {
613 			printf("%s: DMA timeout (while polling)\n",
614 			    device_xname(ncr_sc->sc_dev));
615 			/* Indicate timeout as MI code would. */
616 			sr->sr_flags |= SR_OVERDUE;
617 			break;
618 		}
619 		delay(100);
620 	}
621 
622 #ifdef	DEBUG
623 	if (sw_debug) {
624 		printf("%s: done, csr=0x%x\n", __func__, csr);
625 	}
626 #endif
627 }
628 
629 
630 /*
631  * This is called when the bus is going idle,
632  * so we want to enable the SBC interrupts.
633  * That is controlled by the DMA enable!
634  * Who would have guessed!
635  * What a NASTY trick!
636  *
637  * XXX THIS MIGHT NOT WORK RIGHT!
638  */
639 void
640 sw_intr_on(struct ncr5380_softc *ncr_sc)
641 {
642 	uint32_t csr;
643 
644 	sw_dma_setup(ncr_sc);
645 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
646 	csr |= SW_CSR_DMA_EN;	/* XXX - this bit is for vme only?! */
647 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
648 }
649 
650 /*
651  * This is called when the bus is idle and we are
652  * about to start playing with the SBC chip.
653  *
654  * XXX THIS MIGHT NOT WORK RIGHT!
655  */
656 void
657 sw_intr_off(struct ncr5380_softc *ncr_sc)
658 {
659 	uint32_t csr;
660 
661 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
662 	csr &= ~SW_CSR_DMA_EN;
663 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
664 }
665 
666 
667 /*
668  * This function is called during the COMMAND or MSG_IN phase
669  * that precedes a DATA_IN or DATA_OUT phase, in case we need
670  * to setup the DMA engine before the bus enters a DATA phase.
671  *
672  * On the OBIO version we just clear the DMA count and address
673  * here (to make sure it stays idle) and do the real setup
674  * later, in dma_start.
675  */
676 void
677 sw_dma_setup(struct ncr5380_softc *ncr_sc)
678 {
679 	uint32_t csr;
680 
681 	/* No FIFO to reset on "sw". */
682 
683 	/* Set direction (assume recv here) */
684 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
685 	csr &= ~SW_CSR_SEND;
686 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
687 
688 	SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
689 	SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
690 }
691 
692 
693 void
694 sw_dma_start(struct ncr5380_softc *ncr_sc)
695 {
696 	struct sw_softc *sc = (struct sw_softc *)ncr_sc;
697 	struct sci_req *sr = ncr_sc->sc_current;
698 	struct sw_dma_handle *dh = sr->sr_dma_hand;
699 	u_long dva;
700 	int xlen, adj, adjlen;
701 	u_int mode;
702 	uint32_t csr;
703 
704 	/*
705 	 * Get the DVMA mapping for this segment.
706 	 */
707 	dva = (u_long)(dh->dh_dvma);
708 	if (dva & 1)
709 		panic("%s: bad dva=0x%lx", __func__, dva);
710 
711 	xlen = ncr_sc->sc_datalen;
712 	xlen &= ~1;
713 	sc->sc_xlen = xlen;	/* XXX: or less... */
714 
715 #ifdef	DEBUG
716 	if (sw_debug & 2) {
717 		printf("%s: dh=%p, dva=0x%lx, xlen=%d\n",
718 		    __func__, dh, dva, xlen);
719 	}
720 #endif
721 
722 	/*
723 	 * Set up the DMA controller.
724 	 * Note that (dh->dh_len < sc_datalen)
725 	 */
726 
727 	/* Set direction (send/recv) */
728 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
729 	if (dh->dh_flags & SIDH_OUT) {
730 		csr |= SW_CSR_SEND;
731 	} else {
732 		csr &= ~SW_CSR_SEND;
733 	}
734 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
735 
736 	/*
737 	 * The "sw" needs longword aligned transfers.  We
738 	 * detect a shortword aligned transfer here, and adjust the
739 	 * DMA transfer by 2 bytes.  These two bytes are read/written
740 	 * in PIO mode just before the DMA is started.
741 	 */
742 	adj = 0;
743 	if (dva & 2) {
744 		adj = 2;
745 #ifdef DEBUG
746 		if (sw_debug & 2)
747 			printf("%s: adjusted up %d bytes\n", __func__, adj);
748 #endif
749 	}
750 
751 	/* We have to frob the address on the "sw". */
752 	dh->dh_startingpa = (dva | 0xF00000);
753 	SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
754 	SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
755 
756 	/*
757 	 * Acknowledge the phase change.  (After DMA setup!)
758 	 * Put the SBIC into DMA mode, and start the transfer.
759 	 */
760 	if (dh->dh_flags & SIDH_OUT) {
761 		NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
762 		if (adj) {
763 			adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
764 			    adj, dh->dh_addr);
765 			if (adjlen != adj)
766 				printf("%s: bad outgoing adj, %d != %d\n",
767 				    device_xname(ncr_sc->sc_dev), adjlen, adj);
768 		}
769 		SCI_CLR_INTR(ncr_sc);
770 		NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
771 		mode = NCR5380_READ(ncr_sc, sci_mode);
772 		mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
773 		NCR5380_WRITE(ncr_sc, sci_mode, mode);
774 		NCR5380_WRITE(ncr_sc, sci_dma_send, 0); 	/* start it */
775 	} else {
776 		NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
777 		if (adj) {
778 			adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
779 			    adj, dh->dh_addr);
780 			if (adjlen != adj)
781 				printf("%s: bad incoming adj, %d != %d\n",
782 				    device_xname(ncr_sc->sc_dev), adjlen, adj);
783 		}
784 		SCI_CLR_INTR(ncr_sc);
785 		NCR5380_WRITE(ncr_sc, sci_icmd, 0);
786 		mode = NCR5380_READ(ncr_sc, sci_mode);
787 		mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
788 		NCR5380_WRITE(ncr_sc, sci_mode, mode);
789 		NCR5380_WRITE(ncr_sc, sci_irecv, 0); 	/* start it */
790 	}
791 
792 	/* Let'er rip! */
793 	csr |= SW_CSR_DMA_EN;
794 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
795 
796 	ncr_sc->sc_state |= NCR_DOINGDMA;
797 
798 #ifdef	DEBUG
799 	if (sw_debug & 2) {
800 		printf("%s: started, flags=0x%x\n",
801 		    __func__, ncr_sc->sc_state);
802 	}
803 #endif
804 }
805 
806 
807 void
808 sw_dma_eop(struct ncr5380_softc *ncr_sc)
809 {
810 
811 	/* Not needed - DMA was stopped prior to examining sci_csr */
812 }
813 
814 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
815 #define COUNT_SW_LEFTOVERS
816 #endif
817 #ifdef COUNT_SW_LEFTOVERS
818 /*
819  * Let's find out how often these occur.  Read these with DDB from time
820  * to time.
821  */
822 int	sw_3_leftover = 0;
823 int	sw_2_leftover = 0;
824 int	sw_1_leftover = 0;
825 int	sw_0_leftover = 0;
826 #endif
827 
828 void
829 sw_dma_stop(struct ncr5380_softc *ncr_sc)
830 {
831 	struct sci_req *sr = ncr_sc->sc_current;
832 	struct sw_dma_handle *dh = sr->sr_dma_hand;
833 	int ntrans = 0, dva;
834 	u_int mode;
835 	uint32_t csr;
836 
837 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
838 #ifdef	DEBUG
839 		printf("%s: DMA not running\n", __func__);
840 #endif
841 		return;
842 	}
843 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
844 
845 	/* First, halt the DMA engine. */
846 	csr = SWREG_READ(ncr_sc, SWREG_CSR);
847 	csr &= ~SW_CSR_DMA_EN;
848 	SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
849 
850 	/*
851 	 * XXX HARDWARE BUG!
852 	 * Apparently, some early 4/100 SCSI controllers had a hardware
853 	 * bug that caused the controller to do illegal memory access.
854 	 * We see this as SW_CSR_DMA_BUS_ERR (makes sense).  To work around
855 	 * this, we simply need to clean up after ourselves ... there will
856 	 * be as many as 3 bytes left over.  Since we clean up "left-over"
857 	 * bytes on every read anyway, we just continue to chug along
858 	 * if SW_CSR_DMA_BUS_ERR is asserted.  (This was probably worked
859 	 * around in hardware later with the "left-over byte" indicator
860 	 * in the VME controller.)
861 	 */
862 #if 0
863 	if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR))
864 #else
865 	if (csr & (SW_CSR_DMA_CONFLICT))
866 #endif
867 	{
868 		printf("sw: DMA error, csr=0x%x, reset\n", csr);
869 		sr->sr_xs->error = XS_DRIVER_STUFFUP;
870 		ncr_sc->sc_state |= NCR_ABORTING;
871 		sw_reset_adapter(ncr_sc);
872 	}
873 
874 	/* Note that timeout may have set the error flag. */
875 	if (ncr_sc->sc_state & NCR_ABORTING)
876 		goto out;
877 
878 	/*
879 	 * Now try to figure out how much actually transferred
880 	 *
881 	 * The "sw" doesn't have a FIFO or a bcr, so we've stored
882 	 * the starting PA of the transfer in the DMA handle,
883 	 * and subtract it from the ending PA left in the dma_addr
884 	 * register.
885 	 */
886 	dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
887 	ntrans = (dva - dh->dh_startingpa);
888 
889 #ifdef	DEBUG
890 	if (sw_debug & 2) {
891 		printf("%s: ntrans=0x%x\n", __func__, ntrans);
892 	}
893 #endif
894 
895 	if (ntrans > ncr_sc->sc_datalen)
896 		panic("%s: excess transfer", __func__);
897 
898 	/* Adjust data pointer */
899 	ncr_sc->sc_dataptr += ntrans;
900 	ncr_sc->sc_datalen -= ntrans;
901 
902 	/*
903 	 * After a read, we may need to clean-up
904 	 * "Left-over bytes"  (yuck!)  The "sw" doesn't
905 	 * have a "left-over" indicator, so we have to so
906 	 * this no matter what.  Ick.
907 	 */
908 	if ((dh->dh_flags & SIDH_OUT) == 0) {
909 		char *cp = ncr_sc->sc_dataptr;
910 		uint32_t bpr;
911 
912 		bpr = SWREG_READ(ncr_sc, SWREG_BPR);
913 
914 		switch (dva & 3) {
915 		case 3:
916 			cp[0] = (bpr & 0xff000000) >> 24;
917 			cp[1] = (bpr & 0x00ff0000) >> 16;
918 			cp[2] = (bpr & 0x0000ff00) >> 8;
919 #ifdef COUNT_SW_LEFTOVERS
920 			++sw_3_leftover;
921 #endif
922 			break;
923 
924 		case 2:
925 			cp[0] = (bpr & 0xff000000) >> 24;
926 			cp[1] = (bpr & 0x00ff0000) >> 16;
927 #ifdef COUNT_SW_LEFTOVERS
928 			++sw_2_leftover;
929 #endif
930 			break;
931 
932 		case 1:
933 			cp[0] = (bpr & 0xff000000) >> 24;
934 #ifdef COUNT_SW_LEFTOVERS
935 			++sw_1_leftover;
936 #endif
937 			break;
938 
939 #ifdef COUNT_SW_LEFTOVERS
940 		default:
941 			++sw_0_leftover;
942 			break;
943 #endif
944 		}
945 	}
946 
947  out:
948 	SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
949 	SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
950 
951 	/* Put SBIC back in PIO mode. */
952 	mode = NCR5380_READ(ncr_sc, sci_mode);
953 	mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
954 	NCR5380_WRITE(ncr_sc, sci_mode, mode);
955 	NCR5380_WRITE(ncr_sc, sci_icmd, 0);
956 
957 #ifdef DEBUG
958 	if (sw_debug & 2) {
959 		printf("%s: ntrans=0x%x\n", __func__, ntrans);
960 	}
961 #endif
962 }
963