xref: /netbsd-src/sys/dev/vme/si.c (revision 8ac07aec990b9d2e483062509d0a9fa5b4f57cf2)
1 /*	$NetBSD: si.c,v 1.20 2008/04/04 16:00:58 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996,2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Adam Glass, David Jones, Gordon W. Ross, Jason R. Thorpe and
9  * Paul Kranenburg.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * This file contains VME bus-dependent of the `si' SCSI adapter.
42  * This hardware is frequently found on Sun 3 and Sun 4 machines.
43  *
44  * The SCSI machinery on this adapter is implemented by an NCR5380,
45  * which is taken care of by the chipset driver in /sys/dev/ic/ncr5380sbc.c
46  *
47  * The logic has a bit to enable or disable the DMA engine,
48  * but that bit also gates the interrupt line from the NCR5380!
49  * Therefore, in order to get any interrupt from the 5380, (i.e.
50  * for reselect) one must clear the DMA engine transfer count and
51  * then enable DMA.  This has the further complication that you
52  * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
53  * we have to turn DMA back off before we even look at the 5380.
54  *
55  * What wonderfully whacky hardware this is!
56  *
57  */
58 
59 /*
60  * This driver originated as an MD implementation for the sun3 and sun4
61  * ports. The notes pertaining to that history are included below.
62  *
63  * David Jones wrote the initial version of this module for NetBSD/sun3,
64  * which included support for the VME adapter only. (no reselection).
65  *
66  * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
67  * both the VME and OBIO code to support disconnect/reselect.
68  * (Required figuring out the hardware "features" noted above.)
69  *
70  * The autoconfiguration boilerplate came from Adam Glass.
71  *
72  * Jason R. Thorpe ported the autoconfiguration and VME portions to
73  * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
74  * a wacky OBIO variant of the VME SCSI-3.  Many thanks to Chuck Cranor
75  * for lots of helpful tips and suggestions.  Thanks also to Paul Kranenburg
76  * and Chris Torek for bits of insight needed along the way.  Thanks to
77  * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
78  * for the sake of testing.  Andrew Gillham helped work out the bugs
79  * the 4/100 DMA code.
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: si.c,v 1.20 2008/04/04 16:00:58 tsutsui Exp $");
84 
85 #include "opt_ddb.h"
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/kernel.h>
90 #include <sys/malloc.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/buf.h>
94 
95 #include <sys/bus.h>
96 #include <sys/intr.h>
97 
98 #include <dev/vme/vmereg.h>
99 #include <dev/vme/vmevar.h>
100 
101 #include <dev/scsipi/scsi_all.h>
102 #include <dev/scsipi/scsipi_all.h>
103 #include <dev/scsipi/scsipi_debug.h>
104 #include <dev/scsipi/scsiconf.h>
105 
106 #ifndef Debugger
107 #define	Debugger()
108 #endif
109 
110 #ifndef DEBUG
111 #define DEBUG XXX
112 #endif
113 
114 #include <dev/ic/ncr5380reg.h>
115 #include <dev/ic/ncr5380var.h>
116 
117 #include <dev/vme/sireg.h>
118 
119 /*
120  * Transfers smaller than this are done using PIO
121  * (on assumption they're not worth DMA overhead)
122  */
123 #define	MIN_DMA_LEN 128
124 
125 #ifdef	DEBUG
126 int si_debug = 0;
127 #endif
128 
129 /*
130  * This structure is used to keep track of mapped DMA requests.
131  */
132 struct si_dma_handle {
133 	int 		dh_flags;
134 #define	SIDH_BUSY	0x01		/* This DH is in use */
135 #define	SIDH_OUT	0x02		/* DMA does data out (write) */
136 	int 		dh_maplen;	/* Original data length */
137 	bus_dmamap_t	dh_dmamap;
138 #define dh_dvma	dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
139 };
140 
141 /*
142  * The first structure member has to be the ncr5380_softc
143  * so we can just cast to go back and fourth between them.
144  */
145 struct si_softc {
146 	struct ncr5380_softc	ncr_sc;
147 	bus_space_tag_t		sc_bustag;	/* bus tags */
148 	bus_dma_tag_t		sc_dmatag;
149 	vme_chipset_tag_t	sc_vctag;
150 
151 	int		sc_adapter_iv_am; /* int. vec + address modifier */
152 	struct si_dma_handle *sc_dma;
153 	int		sc_xlen;	/* length of current DMA segment. */
154 	int		sc_options;	/* options for this instance. */
155 };
156 
157 /*
158  * Options.  By default, DMA is enabled and DMA completion interrupts
159  * and reselect are disabled.  You may enable additional features
160  * the `flags' directive in your kernel's configuration file.
161  *
162  * Alternatively, you can patch your kernel with DDB or some other
163  * mechanism.  The sc_options member of the softc is OR'd with
164  * the value in si_options.
165  *
166  * Note, there's a separate sw_options to make life easier.
167  */
168 #define	SI_ENABLE_DMA	0x01	/* Use DMA (maybe polled) */
169 #define	SI_DMA_INTR	0x02	/* DMA completion interrupts */
170 #define	SI_DO_RESELECT	0x04	/* Allow disconnect/reselect */
171 #define	SI_OPTIONS_MASK	(SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT)
172 #define SI_OPTIONS_BITS	"\10\3RESELECT\2DMA_INTR\1DMA"
173 int si_options = SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT;
174 
175 static int	si_match(device_t, cfdata_t, void *);
176 static void	si_attach(device_t, device_t, void *);
177 static int	si_intr(void *);
178 static void	si_reset_adapter(struct ncr5380_softc *);
179 
180 void	si_dma_alloc(struct ncr5380_softc *);
181 void	si_dma_free(struct ncr5380_softc *);
182 void	si_dma_poll(struct ncr5380_softc *);
183 
184 void	si_dma_setup(struct ncr5380_softc *);
185 void	si_dma_start(struct ncr5380_softc *);
186 void	si_dma_eop(struct ncr5380_softc *);
187 void	si_dma_stop(struct ncr5380_softc *);
188 
189 void	si_intr_on (struct ncr5380_softc *);
190 void	si_intr_off(struct ncr5380_softc *);
191 
192 /*
193  * Shorthand bus space access
194  * XXX - must look into endian issues here.
195  */
196 #define SIREG_READ(sc, index) \
197 	bus_space_read_2((sc)->sc_regt, (sc)->sc_regh, index)
198 #define SIREG_WRITE(sc, index, v) \
199 	bus_space_write_2((sc)->sc_regt, (sc)->sc_regh, index, v)
200 
201 
202 /* Auto-configuration glue. */
203 CFATTACH_DECL_NEW(si, sizeof(struct si_softc),
204     si_match, si_attach, NULL, NULL);
205 
206 static int
207 si_match(device_t parent, cfdata_t cf, void *aux)
208 {
209 	struct vme_attach_args	*va = aux;
210 	vme_chipset_tag_t	ct = va->va_vct;
211         vme_am_t		mod;
212         vme_addr_t		vme_addr;
213 
214 	/* Make sure there is something there... */
215 	mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
216 	vme_addr = va->r[0].offset;
217 
218 	if (vme_probe(ct, vme_addr, 1, mod, VME_D8, NULL, 0) != 0)
219 		return 0;
220 
221 	/*
222 	 * If this is a VME SCSI board, we have to determine whether
223 	 * it is an "sc" (Sun2) or "si" (Sun3) SCSI board.  This can
224 	 * be determined using the fact that the "sc" board occupies
225 	 * 4K bytes in VME space but the "si" board occupies 2K bytes.
226 	 */
227 	return vme_probe(ct, vme_addr + 0x801, 1, mod, VME_D8, NULL, 0) != 0;
228 }
229 
230 static void
231 si_attach(device_t parent, device_t self, void *aux)
232 {
233 	struct si_softc		*sc = device_private(self);
234 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
235 	struct vme_attach_args	*va = aux;
236 	vme_chipset_tag_t	ct = va->va_vct;
237 	bus_space_tag_t		bt;
238 	bus_space_handle_t	bh;
239 	vme_mapresc_t resc;
240 	vme_intr_handle_t	ih;
241 	vme_am_t		mod;
242 	char bits[64];
243 	int i;
244 
245 	ncr_sc->sc_dev = self;
246 	sc->sc_dmatag = va->va_bdt;
247 	sc->sc_vctag = ct;
248 
249 	mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
250 
251 	if (vme_space_map(ct, va->r[0].offset, SIREG_BANK_SZ,
252 			  mod, VME_D8, 0, &bt, &bh, &resc) != 0)
253 		panic("%s: vme_space_map", device_xname(self));
254 
255 	ncr_sc->sc_regt = bt;
256 	ncr_sc->sc_regh = bh;
257 
258 	sc->sc_options = si_options;
259 
260 	ncr_sc->sc_dma_setup = si_dma_setup;
261 	ncr_sc->sc_dma_start = si_dma_start;
262 	ncr_sc->sc_dma_eop   = si_dma_stop;
263 	ncr_sc->sc_dma_stop  = si_dma_stop;
264 
265 	vme_intr_map(ct, va->ilevel, va->ivector, &ih);
266 	vme_intr_establish(ct, ih, IPL_BIO, si_intr, sc);
267 
268 	aprint_normal("\n");
269 
270 	sc->sc_adapter_iv_am = (mod << 8) | (va->ivector & 0xFF);
271 
272 	/*
273 	 * Pull in the options flags.  Allow the user to completely
274 	 * override the default values.
275 	 */
276 	if ((device_cfdata(self)->cf_flags & SI_OPTIONS_MASK) != 0)
277 		sc->sc_options =
278 		    device_cfdata(self)->cf_flags & SI_OPTIONS_MASK;
279 
280 	/*
281 	 * Initialize fields used by the MI code
282 	 */
283 
284 	/* NCR5380 register bank offsets */
285 	ncr_sc->sci_r0 = 0;
286 	ncr_sc->sci_r1 = 1;
287 	ncr_sc->sci_r2 = 2;
288 	ncr_sc->sci_r3 = 3;
289 	ncr_sc->sci_r4 = 4;
290 	ncr_sc->sci_r5 = 5;
291 	ncr_sc->sci_r6 = 6;
292 	ncr_sc->sci_r7 = 7;
293 
294 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
295 
296 	/*
297 	 * MD function pointers used by the MI code.
298 	 */
299 	ncr_sc->sc_pio_out = ncr5380_pio_out;
300 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
301 	ncr_sc->sc_dma_alloc = si_dma_alloc;
302 	ncr_sc->sc_dma_free  = si_dma_free;
303 	ncr_sc->sc_dma_poll  = si_dma_poll;
304 
305 	ncr_sc->sc_flags = 0;
306 	if ((sc->sc_options & SI_DO_RESELECT) == 0)
307 		ncr_sc->sc_no_disconnect = 0xFF;
308 	if ((sc->sc_options & SI_DMA_INTR) == 0)
309 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
310 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
311 
312 	/*
313 	 * Allocate DMA handles.
314 	 */
315 	i = SCI_OPENINGS * sizeof(struct si_dma_handle);
316 	sc->sc_dma = malloc(i, M_DEVBUF, M_NOWAIT);
317 	if (sc->sc_dma == NULL)
318 		panic("si: DMA handle malloc failed");
319 
320 	for (i = 0; i < SCI_OPENINGS; i++) {
321 		sc->sc_dma[i].dh_flags = 0;
322 
323 		/* Allocate a DMA handle */
324 		if (vme_dmamap_create(
325 				sc->sc_vctag,	/* VME chip tag */
326 				MAXPHYS,	/* size */
327 				VME_AM_A24,	/* address modifier */
328 				VME_D16,	/* data size */
329 				0,		/* swap */
330 				1,		/* nsegments */
331 				MAXPHYS,	/* maxsegsz */
332 				0,		/* boundary */
333 				BUS_DMA_NOWAIT,
334 				&sc->sc_dma[i].dh_dmamap) != 0) {
335 
336 			aprint_error_dev(self, "DMA buffer map create error\n");
337 			return;
338 		}
339 	}
340 
341 	if (sc->sc_options) {
342 		aprint_normal_dev(self, "options=%s\n",
343 		    bitmask_snprintf(sc->sc_options, SI_OPTIONS_BITS,
344 		    bits, sizeof(bits)));
345 	}
346 
347 	ncr_sc->sc_channel.chan_id = 7;
348 	ncr_sc->sc_adapter.adapt_minphys = minphys;
349 
350 	/*
351 	 *  Initialize si board itself.
352 	 */
353 	si_reset_adapter(ncr_sc);
354 	ncr5380_attach(ncr_sc);
355 
356 	if (sc->sc_options & SI_DO_RESELECT) {
357 		/*
358 		 * Need to enable interrupts (and DMA!)
359 		 * on this H/W for reselect to work.
360 		 */
361 		ncr_sc->sc_intr_on   = si_intr_on;
362 		ncr_sc->sc_intr_off  = si_intr_off;
363 	}
364 }
365 
366 #define CSR_WANT (SI_CSR_SBC_IP | SI_CSR_DMA_IP | \
367 	SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR )
368 
369 static int
370 si_intr(void *arg)
371 {
372 	struct si_softc *sc = arg;
373 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
374 	int dma_error, claimed;
375 	uint16_t csr;
376 
377 	claimed = 0;
378 	dma_error = 0;
379 
380 	/* SBC interrupt? DMA interrupt? */
381 	csr = SIREG_READ(ncr_sc, SIREG_CSR);
382 
383 	NCR_TRACE("si_intr: csr=0x%x\n", csr);
384 
385 	if (csr & SI_CSR_DMA_CONFLICT) {
386 		dma_error |= SI_CSR_DMA_CONFLICT;
387 		printf("%s: DMA conflict\n", __func__);
388 	}
389 	if (csr & SI_CSR_DMA_BUS_ERR) {
390 		dma_error |= SI_CSR_DMA_BUS_ERR;
391 		printf("%s: DMA bus error\n", __func__);
392 	}
393 	if (dma_error) {
394 		if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
395 			sc->ncr_sc.sc_state |= NCR_ABORTING;
396 		/* Make sure we will call the main isr. */
397 		csr |= SI_CSR_DMA_IP;
398 	}
399 
400 	if (csr & (SI_CSR_SBC_IP | SI_CSR_DMA_IP)) {
401 		claimed = ncr5380_intr(&sc->ncr_sc);
402 #ifdef DEBUG
403 		if (!claimed) {
404 			printf("%s: spurious from SBC\n", __func__);
405 			if (si_debug & 4) {
406 				Debugger();	/* XXX */
407 			}
408 		}
409 #endif
410 	}
411 
412 	return claimed;
413 }
414 
415 
416 static void
417 si_reset_adapter(struct ncr5380_softc *ncr_sc)
418 {
419 	struct si_softc *sc = (struct si_softc *)ncr_sc;
420 
421 #ifdef	DEBUG
422 	if (si_debug) {
423 		printf("%s\n", __func__);
424 	}
425 #endif
426 
427 	/*
428 	 * The SCSI3 controller has an 8K FIFO to buffer data between the
429 	 * 5380 and the DMA.  Make sure it starts out empty.
430 	 *
431 	 * The reset bits in the CSR are active low.
432 	 */
433 	SIREG_WRITE(ncr_sc, SIREG_CSR, 0);
434 	delay(10);
435 	SIREG_WRITE(ncr_sc, SIREG_CSR,
436 	    SI_CSR_FIFO_RES | SI_CSR_SCSI_RES | SI_CSR_INTR_EN);
437 	delay(10);
438 
439 	SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
440 	SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
441 	SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
442 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
443 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
444 	SIREG_WRITE(ncr_sc, SIREG_IV_AM, sc->sc_adapter_iv_am);
445 	SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
446 
447 	SCI_CLR_INTR(ncr_sc);
448 }
449 
450 /*****************************************************************
451  * Common functions for DMA
452  ****************************************************************/
453 
454 /*
455  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
456  * for DMA transfer.
457  */
458 void
459 si_dma_alloc(struct ncr5380_softc *ncr_sc)
460 {
461 	struct si_softc *sc = (struct si_softc *)ncr_sc;
462 	struct sci_req *sr = ncr_sc->sc_current;
463 	struct scsipi_xfer *xs = sr->sr_xs;
464 	struct si_dma_handle *dh;
465 	int i, xlen;
466 	u_long addr;
467 
468 #ifdef DIAGNOSTIC
469 	if (sr->sr_dma_hand != NULL)
470 		panic("%s: already have DMA handle", __func__);
471 #endif
472 
473 #if 1	/* XXX - Temporary */
474 	/* XXX - In case we think DMA is completely broken... */
475 	if ((sc->sc_options & SI_ENABLE_DMA) == 0)
476 		return;
477 #endif
478 
479 	addr = (u_long)ncr_sc->sc_dataptr;
480 	xlen = ncr_sc->sc_datalen;
481 
482 	/* If the DMA start addr is misaligned then do PIO */
483 	if ((addr & 1) || (xlen & 1)) {
484 		printf("%s: misaligned.\n", __func__);
485 		return;
486 	}
487 
488 	/* Make sure our caller checked sc_min_dma_len. */
489 	if (xlen < MIN_DMA_LEN)
490 		panic("%s: xlen=0x%x", __func__, xlen);
491 
492 	/* Find free DMA handle.  Guaranteed to find one since we have
493 	   as many DMA handles as the driver has processes. */
494 	for (i = 0; i < SCI_OPENINGS; i++) {
495 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
496 			goto found;
497 	}
498 	panic("si: no free DMA handles.");
499 
500 found:
501 	dh = &sc->sc_dma[i];
502 	dh->dh_flags = SIDH_BUSY;
503 	dh->dh_maplen  = xlen;
504 
505 	/* Copy the "write" flag for convenience. */
506 	if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
507 		dh->dh_flags |= SIDH_OUT;
508 
509 	/*
510 	 * Double-map the buffer into DVMA space.  If we can't re-map
511 	 * the buffer, we print a warning and fall back to PIO mode.
512 	 *
513 	 * NOTE: it is not safe to sleep here!
514 	 */
515 	if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
516 			    (void *)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
517 		/* Can't remap segment */
518 		printf("%s: can't remap 0x%lx/0x%x, doing PIO\n",
519 		    __func__, addr, dh->dh_maplen);
520 		dh->dh_flags = 0;
521 		return;
522 	}
523 	bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
524 			(dh->dh_flags & SIDH_OUT)
525 				? BUS_DMASYNC_PREWRITE
526 				: BUS_DMASYNC_PREREAD);
527 
528 	/* success */
529 	sr->sr_dma_hand = dh;
530 }
531 
532 
533 void
534 si_dma_free(struct ncr5380_softc *ncr_sc)
535 {
536 	struct si_softc *sc = (struct si_softc *)ncr_sc;
537 	struct sci_req *sr = ncr_sc->sc_current;
538 	struct si_dma_handle *dh = sr->sr_dma_hand;
539 
540 #ifdef DIAGNOSTIC
541 	if (dh == NULL)
542 		panic("%s: no DMA handle", __func__);
543 #endif
544 
545 	if (ncr_sc->sc_state & NCR_DOINGDMA)
546 		panic("%s: free while in progress", __func__);
547 
548 	if (dh->dh_flags & SIDH_BUSY) {
549 		/* Give back the DVMA space. */
550 		bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
551 				dh->dh_dvma, dh->dh_maplen,
552 				(dh->dh_flags & SIDH_OUT)
553 					? BUS_DMASYNC_POSTWRITE
554 					: BUS_DMASYNC_POSTREAD);
555 		bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
556 		dh->dh_flags = 0;
557 	}
558 	sr->sr_dma_hand = NULL;
559 }
560 
561 
562 /*
563  * Poll (spin-wait) for DMA completion.
564  * Called right after xx_dma_start(), and
565  * xx_dma_stop() will be called next.
566  * Same for either VME or OBIO.
567  */
568 void
569 si_dma_poll(struct ncr5380_softc *ncr_sc)
570 {
571 	struct sci_req *sr = ncr_sc->sc_current;
572 	int tmo, csr_mask, csr;
573 
574 	/* Make sure DMA started successfully. */
575 	if (ncr_sc->sc_state & NCR_ABORTING)
576 		return;
577 
578 	csr_mask = SI_CSR_SBC_IP | SI_CSR_DMA_IP |
579 		SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR;
580 
581 	tmo = 50000;	/* X100 = 5 sec. */
582 	for (;;) {
583 		csr = SIREG_READ(ncr_sc, SIREG_CSR);
584 		if (csr & csr_mask)
585 			break;
586 		if (--tmo <= 0) {
587 			printf("%s: DMA timeout (while polling)\n",
588 			    device_xname(ncr_sc->sc_dev));
589 			/* Indicate timeout as MI code would. */
590 			sr->sr_flags |= SR_OVERDUE;
591 			break;
592 		}
593 		delay(100);
594 	}
595 
596 #ifdef	DEBUG
597 	if (si_debug) {
598 		printf("%s: done, csr=0x%x\n", __func__, csr);
599 	}
600 #endif
601 }
602 
603 
604 /*****************************************************************
605  * VME functions for DMA
606  ****************************************************************/
607 
608 
609 /*
610  * This is called when the bus is going idle,
611  * so we want to enable the SBC interrupts.
612  * That is controlled by the DMA enable!
613  * Who would have guessed!
614  * What a NASTY trick!
615  */
616 void
617 si_intr_on(struct ncr5380_softc *ncr_sc)
618 {
619 	uint16_t csr;
620 
621 	/* Clear DMA start address and counters */
622 	SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
623 	SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
624 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
625 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
626 
627 	/* Enter receive mode (for safety) and enable DMA engine */
628 	csr = SIREG_READ(ncr_sc, SIREG_CSR);
629 	csr &= ~SI_CSR_SEND;
630 	csr |= SI_CSR_DMA_EN;
631 	SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
632 }
633 
634 /*
635  * This is called when the bus is idle and we are
636  * about to start playing with the SBC chip.
637  */
638 void
639 si_intr_off(struct ncr5380_softc *ncr_sc)
640 {
641 	uint16_t csr;
642 
643 	csr = SIREG_READ(ncr_sc, SIREG_CSR);
644 	csr &= ~SI_CSR_DMA_EN;
645 	SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
646 }
647 
648 /*
649  * This function is called during the COMMAND or MSG_IN phase
650  * that precedes a DATA_IN or DATA_OUT phase, in case we need
651  * to setup the DMA engine before the bus enters a DATA phase.
652  *
653  * XXX: The VME adapter appears to suppress SBC interrupts
654  * when the FIFO is not empty or the FIFO count is non-zero!
655  *
656  * On the VME version we just clear the DMA count and address
657  * here (to make sure it stays idle) and do the real setup
658  * later, in dma_start.
659  */
660 void
661 si_dma_setup(struct ncr5380_softc *ncr_sc)
662 {
663 	struct si_softc *sc = (struct si_softc *)ncr_sc;
664 	struct sci_req *sr = ncr_sc->sc_current;
665 	struct si_dma_handle *dh = sr->sr_dma_hand;
666 	uint16_t csr;
667 	u_long dva;
668 	int xlen;
669 
670 	/*
671 	 * Set up the DMA controller.
672 	 * Note that (dh->dh_len < sc_datalen)
673 	 */
674 
675 	csr = SIREG_READ(ncr_sc, SIREG_CSR);
676 
677 	/* Disable DMA while we're setting up the transfer */
678 	csr &= ~SI_CSR_DMA_EN;
679 
680 	/* Reset the FIFO */
681 	csr &= ~SI_CSR_FIFO_RES;		/* active low */
682 	SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
683 	csr |= SI_CSR_FIFO_RES;
684 	SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
685 
686 	/*
687 	 * Get the DVMA mapping for this segment.
688 	 */
689 	dva = (u_long)(dh->dh_dvma);
690 	if (dva & 1)
691 		panic("%s: bad dmaaddr=0x%lx", __func__, dva);
692 	xlen = ncr_sc->sc_datalen;
693 	xlen &= ~1;
694 	sc->sc_xlen = xlen;	/* XXX: or less... */
695 
696 #ifdef	DEBUG
697 	if (si_debug & 2) {
698 		printf("%s: dh=%p, dmaaddr=0x%lx, xlen=%d\n",
699 		    __func__, dh, dva, xlen);
700 	}
701 #endif
702 	/* Set direction (send/recv) */
703 	if (dh->dh_flags & SIDH_OUT) {
704 		csr |= SI_CSR_SEND;
705 	} else {
706 		csr &= ~SI_CSR_SEND;
707 	}
708 
709 	/* Set byte-packing control */
710 	if (dva & 2) {
711 		csr |= SI_CSR_BPCON;
712 	} else {
713 		csr &= ~SI_CSR_BPCON;
714 	}
715 
716 	SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
717 
718 	/* Load start address */
719 	SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, (uint16_t)(dva >> 16));
720 	SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, (uint16_t)(dva & 0xFFFF));
721 
722 	/* Clear DMA counters; these will be set in si_dma_start() */
723 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
724 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
725 
726 	/* Clear FIFO counter. (also hits dma_count) */
727 	SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
728 	SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
729 }
730 
731 
732 void
733 si_dma_start(struct ncr5380_softc *ncr_sc)
734 {
735 	struct si_softc *sc = (struct si_softc *)ncr_sc;
736 	struct sci_req *sr = ncr_sc->sc_current;
737 	struct si_dma_handle *dh = sr->sr_dma_hand;
738 	int xlen;
739 	u_int mode;
740 	uint16_t csr;
741 
742 	xlen = sc->sc_xlen;
743 
744 	/* Load transfer length */
745 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, (uint16_t)(xlen >> 16));
746 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, (uint16_t)(xlen & 0xFFFF));
747 	SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, (uint16_t)(xlen >> 16));
748 	SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, (uint16_t)(xlen & 0xFFFF));
749 
750 	/*
751 	 * Acknowledge the phase change.  (After DMA setup!)
752 	 * Put the SBIC into DMA mode, and start the transfer.
753 	 */
754 	if (dh->dh_flags & SIDH_OUT) {
755 		NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
756 		SCI_CLR_INTR(ncr_sc);
757 		NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
758 
759 		mode = NCR5380_READ(ncr_sc, sci_mode);
760 		mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
761 		NCR5380_WRITE(ncr_sc, sci_mode, mode);
762 
763 		NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
764 	} else {
765 		NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
766 		SCI_CLR_INTR(ncr_sc);
767 		NCR5380_WRITE(ncr_sc, sci_icmd, 0);
768 
769 		mode = NCR5380_READ(ncr_sc, sci_mode);
770 		mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
771 		NCR5380_WRITE(ncr_sc, sci_mode, mode);
772 
773 		NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
774 	}
775 
776 	ncr_sc->sc_state |= NCR_DOINGDMA;
777 
778 	/* Enable DMA engine */
779 	csr = SIREG_READ(ncr_sc, SIREG_CSR);
780 	csr |= SI_CSR_DMA_EN;
781 	SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
782 
783 #ifdef	DEBUG
784 	if (si_debug & 2) {
785 		printf("%s: started, flags=0x%x\n",
786 		    __func__, ncr_sc->sc_state);
787 	}
788 #endif
789 }
790 
791 
792 void
793 si_dma_eop(struct ncr5380_softc *ncr_sc)
794 {
795 
796 	/* Not needed - DMA was stopped prior to examining sci_csr */
797 }
798 
799 
800 void
801 si_dma_stop(struct ncr5380_softc *ncr_sc)
802 {
803 	struct si_softc *sc = (struct si_softc *)ncr_sc;
804 	struct sci_req *sr = ncr_sc->sc_current;
805 	struct si_dma_handle *dh = sr->sr_dma_hand;
806 	int resid, ntrans;
807 	uint16_t csr;
808 	u_int mode;
809 
810 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
811 #ifdef	DEBUG
812 		printf("%s: DMA not running\n", __func__);
813 #endif
814 		return;
815 	}
816 
817 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
818 
819 	csr = SIREG_READ(ncr_sc, SIREG_CSR);
820 
821 	/* First, halt the DMA engine. */
822 	csr &= ~SI_CSR_DMA_EN;
823 	SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
824 
825 	if (csr & (SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR)) {
826 		printf("si: DMA error, csr=0x%x, reset\n", csr);
827 		sr->sr_xs->error = XS_DRIVER_STUFFUP;
828 		ncr_sc->sc_state |= NCR_ABORTING;
829 		si_reset_adapter(ncr_sc);
830 	}
831 
832 	/* Note that timeout may have set the error flag. */
833 	if (ncr_sc->sc_state & NCR_ABORTING)
834 		goto out;
835 
836 	/*
837 	 * Now try to figure out how much actually transferred
838 	 *
839 	 * The fifo_count does not reflect how many bytes were
840 	 * actually transferred for VME.
841 	 *
842 	 * SCSI-3 VME interface is a little funny on writes:
843 	 * if we have a disconnect, the DMA has overshot by
844 	 * one byte and the resid needs to be incremented.
845 	 * Only happens for partial transfers.
846 	 * (Thanks to Matt Jacob)
847 	 */
848 
849 	resid = SIREG_READ(ncr_sc, SIREG_FIFO_CNTH) << 16;
850 	resid |= SIREG_READ(ncr_sc, SIREG_FIFO_CNT) & 0xFFFF;
851 	if (dh->dh_flags & SIDH_OUT)
852 		if ((resid > 0) && (resid < sc->sc_xlen))
853 			resid++;
854 	ntrans = sc->sc_xlen - resid;
855 
856 #ifdef	DEBUG
857 	if (si_debug & 2) {
858 		printf("%s: resid=0x%x ntrans=0x%x\n",
859 		    __func__, resid, ntrans);
860 	}
861 #endif
862 
863 	if (ntrans > ncr_sc->sc_datalen)
864 		panic("%s: excess transfer", __func__);
865 
866 	/* Adjust data pointer */
867 	ncr_sc->sc_dataptr += ntrans;
868 	ncr_sc->sc_datalen -= ntrans;
869 
870 #ifdef	DEBUG
871 	if (si_debug & 2) {
872 		printf("%s: ntrans=0x%x\n", __func__, ntrans);
873 	}
874 #endif
875 
876 	/*
877 	 * After a read, we may need to clean-up
878 	 * "Left-over bytes" (yuck!)
879 	 */
880 	if (((dh->dh_flags & SIDH_OUT) == 0) &&
881 		((csr & SI_CSR_LOB) != 0)) {
882 		uint8_t *cp = ncr_sc->sc_dataptr;
883 		uint16_t bprh, bprl;
884 
885 		bprh = SIREG_READ(ncr_sc, SIREG_BPRH);
886 		bprl = SIREG_READ(ncr_sc, SIREG_BPRL);
887 
888 #ifdef DEBUG
889 		printf("si: got left-over bytes: bprh=%x, bprl=%x, csr=%x\n",
890 			bprh, bprl, csr);
891 #endif
892 
893 		if (csr & SI_CSR_BPCON) {
894 			/* have SI_CSR_BPCON */
895 			cp[-1] = (bprl & 0xff00) >> 8;
896 		} else {
897 			switch (csr & SI_CSR_LOB) {
898 			case SI_CSR_LOB_THREE:
899 				cp[-3] = (bprh & 0xff00) >> 8;
900 				cp[-2] = (bprh & 0x00ff);
901 				cp[-1] = (bprl & 0xff00) >> 8;
902 				break;
903 			case SI_CSR_LOB_TWO:
904 				cp[-2] = (bprh & 0xff00) >> 8;
905 				cp[-1] = (bprh & 0x00ff);
906 				break;
907 			case SI_CSR_LOB_ONE:
908 				cp[-1] = (bprh & 0xff00) >> 8;
909 				break;
910 			}
911 		}
912 	}
913 
914 out:
915 	SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
916 	SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
917 
918 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
919 	SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
920 
921 	SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
922 	SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
923 
924 	mode = NCR5380_READ(ncr_sc, sci_mode);
925 	/* Put SBIC back in PIO mode. */
926 	mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
927 	NCR5380_WRITE(ncr_sc, sci_mode, mode);
928 	NCR5380_WRITE(ncr_sc, sci_icmd, 0);
929 }
930