xref: /netbsd-src/sys/arch/vax/vsa/ncr.c (revision 1394f01b4a9e99092957ca5d824d67219565d9b5)
1 /*	$NetBSD: ncr.c,v 1.8 1997/02/26 22:29:12 gwr Exp $	*/
2 
3 /* #define DEBUG	/* */
4 /* #define TRACE	/* */
5 /* #define POLL_MODE	/* */
6 #define USE_VMAPBUF
7 
8 /*
9  * Copyright (c) 1995 David Jones, Gordon W. Ross
10  * Copyright (c) 1994 Adam Glass
11  * All rights reserved.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The name of the authors may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  * 4. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by
26  *	Adam Glass, David Jones, and Gordon Ross
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
29  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * This file contains only the machine-dependent parts of the
42  * Sun3 SCSI driver.  (Autoconfig stuff and DMA functions.)
43  * The machine-independent parts are in ncr5380sbc.c
44  *
45  * Supported hardware includes:
46  * Sun SCSI-3 on OBIO (Sun3/50,Sun3/60)
47  * Sun SCSI-3 on VME (Sun3/160,Sun3/260)
48  *
49  * Could be made to support the Sun3/E if someone wanted to.
50  *
51  * Note:  Both supported variants of the Sun SCSI-3 adapter have
52  * some really unusual "features" for this driver to deal with,
53  * generally related to the DMA engine.	 The OBIO variant will
54  * ignore any attempt to write the FIFO count register while the
55  * SCSI bus is in DATA_IN or DATA_OUT phase.  This is dealt with
56  * by setting the FIFO count early in COMMAND or MSG_IN phase.
57  *
58  * The VME variant has a bit to enable or disable the DMA engine,
59  * but that bit also gates the interrupt line from the NCR5380!
60  * Therefore, in order to get any interrupt from the 5380, (i.e.
61  * for reselect) one must clear the DMA engine transfer count and
62  * then enable DMA.  This has the further complication that you
63  * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
64  * we have to turn DMA back off before we even look at the 5380.
65  *
66  * What wonderfully whacky hardware this is!
67  *
68  * Credits, history:
69  *
70  * David Jones wrote the initial version of this module, which
71  * included support for the VME adapter only. (no reselection).
72  *
73  * Gordon Ross added support for the OBIO adapter, and re-worked
74  * both the VME and OBIO code to support disconnect/reselect.
75  * (Required figuring out the hardware "features" noted above.)
76  *
77  * The autoconfiguration boilerplate came from Adam Glass.
78  *
79  * VS2000:
80  */
81 
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/conf.h>
86 #include <sys/file.h>
87 #include <sys/stat.h>
88 #include <sys/ioctl.h>
89 #include <sys/buf.h>
90 #include <sys/proc.h>
91 #include <sys/user.h>
92 #include <sys/map.h>
93 #include <sys/device.h>
94 #include <sys/dkstat.h>
95 #include <sys/disklabel.h>
96 #include <sys/disk.h>
97 #include <sys/syslog.h>
98 
99 /* #include <sys/errno.h> */
100 
101 #include <scsi/scsi_all.h>
102 #include <scsi/scsi_debug.h>
103 #include <scsi/scsiconf.h>
104 
105 #include <machine/uvax.h>
106 #include <machine/ka410.h>
107 #include <machine/ka43.h>
108 #include <machine/vsbus.h>	/* struct confargs */
109 
110 #include <dev/ic/ncr5380reg.h>
111 #include <dev/ic/ncr5380var.h>
112 
113 #define trace(x)
114 #define debug(x)
115 
116 #ifndef NCR5380_CSRBITS
117 #define NCR5380_CSRBITS \
118 	"\020\010DEND\007DREQ\006PERR\005IREQ\004MTCH\003DCON\002ATN\001ACK"
119 #endif
120 
121 #ifndef NCR5380_BUSCSRBITS
122 #define NCR5380_BUSCSRBITS \
123 	"\020\010RST\007BSY\006REQ\005MSG\004C/D\003I/O\002SEL\001DBP"
124 #endif
125 
126 #include "ncr.h"
127 
128 #ifdef DDB
129 #define integrate
130 #else
131 #define integrate static
132 #endif
133 
134 /*
135  * Transfers smaller than this are done using PIO
136  * (on assumption they're not worth DMA overhead)
137  */
138 #define MIN_DMA_LEN 128
139 
140 /*
141  * Transfers lager than 65535 bytes need to be split-up.
142  * (Some of the FIFO logic has only 16 bits counters.)
143  * Make the size an integer multiple of the page size
144  * to avoid buf/cluster remap problems.	 (paranoid?)
145  *
146  * bertram: VS2000 has an DMA-area which is 16KB, thus
147  * have a maximum DMA-size of 16KB...
148  */
149 #ifdef DMA_SHARED
150 #define MAX_DMA_LEN	0x2000		/* (8 * 1024) */
151 #define DMA_ADDR_HBYTE	0x20
152 #define DMA_ADDR_LBYTE	0x00
153 #else
154 #define MAX_DMA_LEN	0x4000		/* (16 * 1024) */
155 #define DMA_ADDR_HBYTE	0x00
156 #define DMA_ADDR_LBYTE	0x00
157 #endif
158 
159 #ifdef	DEBUG
160 int si_debug = 3;
161 static int si_link_flags = 0 /* | SDEV_DB2 */ ;
162 #endif
163 
164 /*
165  * This structure is used to keep track of mappedpwd DMA requests.
166  * Note: combined the UDC command block with this structure, so
167  * the array of these has to be in DVMA space.
168  */
169 struct si_dma_handle {
170 	int		dh_flags;
171 #define SIDH_BUSY	1		/* This DH is in use */
172 #define SIDH_OUT	2		/* DMA does data out (write) */
173 #define SIDH_PHYS	4
174 #define SIDH_DONE	8
175 	u_char *	dh_addr;	/* KVA of start of buffer */
176 	int		dh_maplen;	/* Length of KVA mapping. */
177 	u_char *	dh_dvma;	/* VA of buffer in DVMA space */
178 	int		dh_xlen;
179 };
180 
181 /*
182  * The first structure member has to be the ncr5380_softc
183  * so we can just cast to go back and fourth between them.
184  */
185 struct si_softc {
186 	struct ncr5380_softc	ncr_sc;
187 	volatile struct si_regs *sc_regs;	/* do we really need this? */
188 
189 	struct si_dma_handle	*sc_dma;
190 	struct confargs		*sc_cfargs;
191 
192 	int	sc_xflags;	/* ka410/ka43: resid, sizeof(areg) */
193 
194 	char	*sc_dbase;
195 	int	sc_dsize;
196 
197 	volatile char	*sc_dareg;
198 	volatile short	*sc_dcreg;
199 	volatile char	*sc_ddreg;
200 	volatile int	sc_dflags;
201 
202 #define VSDMA_LOCKED	0x80	/* */
203 #define VSDMA_WANTED	0x40	/* */
204 #define VSDMA_IWANTED	0x20
205 #define VSDMA_BLOCKED	0x10
206 #define VSDMA_DMABUSY	0x08	/* DMA in progress */
207 #define VSDMA_REGBUSY	0x04	/* accessing registers */
208 #define VSDMA_WRBUF	0x02	/* writing to bounce-buffer */
209 #define VSDMA_RDBUF	0x01	/* reading from bounce-buffer */
210 
211 #define VSDMA_STATUS	0xF0
212 #define VSDMA_LCKTYPE	0x0F
213 
214 #ifdef POLL_MODE
215 	volatile u_char *intreq;
216 	volatile u_char *intclr;
217 	volatile u_char *intmsk;
218 	volatile int	intbit;
219 #endif
220 };
221 
222 extern int cold;	/* enable polling while cold-flag set */
223 
224 /* Options.  Interesting values are: 1,3,7 */
225 int si_options = 3;	/* bertram: 3 or 7 ??? */
226 #define SI_ENABLE_DMA	1	/* Use DMA (maybe polled) */
227 #define SI_DMA_INTR	2	/* DMA completion interrupts */
228 #define SI_DO_RESELECT	4	/* Allow disconnect/reselect */
229 
230 #define DMA_DIR_IN  1
231 #define DMA_DIR_OUT 0
232 
233 /* How long to wait for DMA before declaring an error. */
234 int si_dma_intr_timo = 500;	/* ticks (sec. X 100) */
235 
236 integrate char si_name[] = "ncr";
237 integrate int	si_match();
238 integrate void	si_attach();
239 integrate int	si_intr __P((void *));
240 
241 integrate void	si_minphys __P((struct buf *bp));
242 integrate void	si_reset_adapter __P((struct ncr5380_softc *sc));
243 
244 void si_dma_alloc __P((struct ncr5380_softc *));
245 void si_dma_free __P((struct ncr5380_softc *));
246 void si_dma_poll __P((struct ncr5380_softc *));
247 
248 void si_intr_on __P((struct ncr5380_softc *));
249 void si_intr_off __P((struct ncr5380_softc *));
250 
251 int si_dmaLockBus __P((struct ncr5380_softc *, int));
252 int si_dmaToggleLock __P((struct ncr5380_softc *, int, int));
253 int si_dmaReleaseBus __P((struct ncr5380_softc *, int));
254 
255 void si_dma_setup __P((struct ncr5380_softc *));
256 void si_dma_start __P((struct ncr5380_softc *));
257 void si_dma_eop __P((struct ncr5380_softc *));
258 void si_dma_stop __P((struct ncr5380_softc *));
259 
260 static struct scsi_adapter	si_ops = {
261 	ncr5380_scsi_cmd,		/* scsi_cmd()		*/
262 	si_minphys,			/* scsi_minphys()	*/
263 	NULL,				/* open_target_lu()	*/
264 	NULL,				/* close_target_lu()	*/
265 };
266 
267 /* This is copied from julian's bt driver */
268 /* "so we have a default dev struct for our link struct." */
269 static struct scsi_device si_dev = {
270 	NULL,		/* Use default error handler.	    */
271 	NULL,		/* Use default start handler.		*/
272 	NULL,		/* Use default async handler.	    */
273 	NULL,		/* Use default "done" routine.	    */
274 };
275 
276 
277 struct cfdriver ncr_cd = {
278 	NULL, si_name, DV_DULL
279 };
280 struct cfattach ncr_ca = {
281 	sizeof(struct si_softc), si_match, si_attach,
282 };
283 
284 void
285 dk_establish(p,q)
286 	struct disk *p;
287 	struct device *q;
288 {
289 #if 0
290 	printf ("faking dk_establish()...\n");
291 #endif
292 }
293 
294 
295 integrate int
296 si_match(parent, match, aux)
297 	struct device	*parent;
298 	void		*match, *aux;
299 {
300 	struct cfdata	*cf = match;
301 	struct confargs *ca = aux;
302 
303 	trace(("ncr_match(0x%x, %d, %s)\n", parent, cf->cf_unit, ca->ca_name));
304 
305 	if (strcmp(ca->ca_name, "ncr") &&
306 	    strcmp(ca->ca_name, "ncr5380") &&
307 	    strcmp(ca->ca_name, "NCR5380"))
308 		return (0);
309 
310 	/*
311 	 * we just define it being there ...
312 	 */
313 	return (1);
314 }
315 
316 integrate void
317 si_set_portid(pid,port)
318 	int pid;
319 	int port;
320 {
321 	struct {
322 	  u_long    :2;
323 	  u_long id0:3;
324 	  u_long id1:3;
325 	  u_long    :26;
326 	} *p;
327 
328 #ifdef DEBUG
329 	int *ip;
330 	ip = (void*)uvax_phys2virt(KA410_SCSIPORT);
331 	p = (void*)uvax_phys2virt(KA410_SCSIPORT);
332 	printf("scsi-id: (%x/%d) %d / %d\n", *ip, *ip, p->id0, p->id1);
333 #endif
334 
335 	p = (void*)uvax_phys2virt(KA410_SCSIPORT);
336 	switch (port) {
337 	case 0:
338 		p->id0 = pid;
339 		printf(": scsi-id %d\n", p->id0);
340 		break;
341 	case 1:
342 		p->id1 = pid;
343 		printf(": scsi-id %d\n", p->id1);
344 		break;
345 	default:
346 		printf("invalid port-number %d\n", port);
347 	}
348 }
349 
350 integrate void
351 si_attach(parent, self, aux)
352 	struct device	*parent, *self;
353 	void		*aux;
354 {
355 	struct si_softc *sc = (struct si_softc *) self;
356 	struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)sc;
357 	volatile struct si_regs *regs;
358 	struct confargs *ca = aux;
359 	int i;
360 	int *ip = aux;;
361 
362 	trace (("ncr_attach(0x%x, 0x%x, %s)\n", parent, self, ca->ca_name));
363 
364 	/*
365 	 *
366 	 */
367 #ifdef POLL_MODE
368 	sc->intreq = (void*)uvax_phys2virt(KA410_INTREQ);
369 	sc->intmsk = (void*)uvax_phys2virt(KA410_INTMSK);
370 	sc->intclr = (void*)uvax_phys2virt(KA410_INTCLR);
371 	sc->intbit = ca->ca_intbit;
372 #endif
373 
374 	sc->sc_cfargs = ca;	/* needed for interrupt-setup */
375 
376 	regs = (void*)uvax_phys2virt(ca->ca_ioaddr);
377 
378 	sc->sc_dareg = (void*)uvax_phys2virt(ca->ca_dareg);
379 	sc->sc_dcreg = (void*)uvax_phys2virt(ca->ca_dcreg);
380 	sc->sc_ddreg = (void*)uvax_phys2virt(ca->ca_ddreg);
381 	sc->sc_dbase = (void*)uvax_phys2virt(ca->ca_dbase);
382 	sc->sc_dsize = ca->ca_dsize;
383 	sc->sc_dflags = 4;	/* XXX */
384 	sc->sc_xflags = ca->ca_dflag;	/* should/will be renamed */
385 	/*
386 	 * Fill in the prototype scsi_link.
387 	 */
388 	ncr_sc->sc_link.channel = SCSI_CHANNEL_ONLY_ONE;
389 	ncr_sc->sc_link.adapter_softc = sc;
390 	ncr_sc->sc_link.adapter_target = ca->ca_idval;
391 	ncr_sc->sc_link.adapter = &si_ops;
392 	ncr_sc->sc_link.device = &si_dev;
393 
394 	si_set_portid(ca->ca_idval, ncr_sc->sc_dev.dv_unit);
395 
396 	/*
397 	 * Initialize fields used by the MI code
398 	 */
399 	ncr_sc->sci_r0 = (void*)&regs->sci.sci_r0;
400 	ncr_sc->sci_r1 = (void*)&regs->sci.sci_r1;
401 	ncr_sc->sci_r2 = (void*)&regs->sci.sci_r2;
402 	ncr_sc->sci_r3 = (void*)&regs->sci.sci_r3;
403 	ncr_sc->sci_r4 = (void*)&regs->sci.sci_r4;
404 	ncr_sc->sci_r5 = (void*)&regs->sci.sci_r5;
405 	ncr_sc->sci_r6 = (void*)&regs->sci.sci_r6;
406 	ncr_sc->sci_r7 = (void*)&regs->sci.sci_r7;
407 
408 	/*
409 	 * MD function pointers used by the MI code.
410 	 */
411 	ncr_sc->sc_pio_out = ncr5380_pio_out;
412 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
413 	ncr_sc->sc_dma_alloc = si_dma_alloc;
414 	ncr_sc->sc_dma_free  = si_dma_free;
415 	ncr_sc->sc_dma_poll  = si_dma_poll;	/* si_dma_poll not used! */
416 	ncr_sc->sc_intr_on   = si_intr_on;	/* vsbus_unlockDMA; */
417 	ncr_sc->sc_intr_off  = si_intr_off;	/* vsbus_lockDMA; */
418 
419 	ncr_sc->sc_dma_setup = NULL;		/* si_dma_setup not used! */
420 	ncr_sc->sc_dma_start = si_dma_start;
421 	ncr_sc->sc_dma_eop   = NULL;
422 	ncr_sc->sc_dma_stop  = si_dma_stop;
423 
424 	ncr_sc->sc_flags = 0;
425 	if ((si_options & SI_DO_RESELECT) == 0)
426 		ncr_sc->sc_no_disconnect = 0xff;
427 	if ((si_options & SI_DMA_INTR) == 0)
428 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
429 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
430 
431 	/*
432 	 * Initialize fields used only here in the MD code.
433 	 */
434 	i = SCI_OPENINGS * sizeof(struct si_dma_handle);
435 	sc->sc_dma = (struct si_dma_handle *) malloc(i);
436 	if (sc->sc_dma == NULL)
437 		panic("si: dvma_malloc failed\n");
438 	for (i = 0; i < SCI_OPENINGS; i++)
439 		sc->sc_dma[i].dh_flags = 0;
440 
441 	sc->sc_regs = regs;
442 
443 #ifdef	DEBUG
444 	if (si_debug)
445 		printf("si: Set TheSoftC=%x TheRegs=%x\n", sc, regs);
446 	ncr_sc->sc_link.flags |= si_link_flags;
447 #endif
448 
449 	/*
450 	 *  Initialize si board itself.
451 	 */
452 	si_reset_adapter(ncr_sc);
453 	ncr5380_init(ncr_sc);
454 	ncr5380_reset_scsibus(ncr_sc);
455 	config_found(self, &(ncr_sc->sc_link), scsiprint);
456 
457 	/*
458 	 * Now ready for interrupts.
459 	 */
460 	vsbus_intr_register(sc->sc_cfargs, si_intr, (void *)sc);
461 	vsbus_intr_enable(sc->sc_cfargs);
462 }
463 
464 integrate void
465 si_minphys(struct buf *bp)
466 {
467 	debug(("minphys: blkno=%d, bcount=%d, data=0x%x, flags=%x\n",
468 	      bp->b_blkno, bp->b_bcount, bp->b_data, bp->b_flags));
469 
470 	if (bp->b_bcount > MAX_DMA_LEN) {
471 #ifdef	DEBUG
472 		if (si_debug) {
473 			printf("si_minphys len = 0x%x.\n", bp->b_bcount);
474 #ifdef DDB
475 			Debugger();
476 #endif
477 		}
478 #endif
479 		bp->b_bcount = MAX_DMA_LEN;
480 	}
481 	return (minphys(bp));
482 }
483 
484 
485 #define CSR_WANT (SI_CSR_SBC_IP | SI_CSR_DMA_IP | \
486 	SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR )
487 
488 static int si_intrCount = 0;
489 static int lastCSR = 0;
490 
491 integrate int
492 si_intr(arg)
493 	void *arg;
494 {
495 	struct ncr5380_softc *ncr_sc = arg;
496 	struct si_softc *sc = arg;
497 	int count, claimed;
498 
499 	count = ++si_intrCount;
500 	trace(("%s: si-intr(%d).....\n", ncr_sc->sc_dev.dv_xname, count));
501 
502 #ifdef DEBUG
503 	/*
504 	 * Each DMA interrupt is followed by one spurious(?) interrupt.
505 	 * if (ncr_sc->sc_state & NCR_WORKING == 0) we know, that the
506 	 * interrupt was not claimed by the higher-level routine, so that
507 	 * it might be save to ignore these...
508 	 */
509 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
510 		printf("spurious(%d): %x, %d, status=%b\n", count,
511 		       sc->sc_dflags, ncr_sc->sc_ncmds,
512 		       *ncr_sc->sci_csr, NCR5380_CSRBITS);
513 	}
514 #endif
515 	/*
516 	 * If there was a DMA operation in progress, now it's no longer
517 	 * active, since whatever caused the interrupt also interrupted
518 	 * the DMA operation. Thus accessing the registers now doesn't
519 	 * harm anything which is not yet broken...
520 	 */
521 	debug(("si_intr(status: %x, dma-count: %d)\n",
522 	       *ncr_sc->sci_csr, *sc->sc_dcreg));
523 
524 	/*
525 	 * First check for DMA errors / incomplete transfers
526 	 * If operation was read/data-in, the copy data from buffer
527 	 */
528 	if (ncr_sc->sc_state & NCR_DOINGDMA) {
529 		struct sci_req *sr = ncr_sc->sc_current;
530 		struct si_dma_handle *dh = sr->sr_dma_hand;
531 		int resid, ntrans;
532 
533 		resid = *sc->sc_dcreg;
534 		if (resid == 1 && sc->sc_xflags) {
535 		  debug(("correcting resid...\n"));
536 		  resid = 0;
537 		}
538 		ntrans = dh->dh_xlen + resid;
539 		if (resid == 0) {
540 			if ((dh->dh_flags & SIDH_OUT) == 0) {
541 				si_dmaToggleLock(ncr_sc,
542 						 VSDMA_DMABUSY, VSDMA_RDBUF);
543 				bcopy(sc->sc_dbase, dh->dh_dvma, ntrans);
544 				si_dmaToggleLock(ncr_sc,
545 						 VSDMA_RDBUF, VSDMA_DMABUSY);
546 				dh->dh_flags |= SIDH_DONE;
547 			}
548 		}
549 		else {
550 #ifdef DEBUG
551 			int csr = *ncr_sc->sci_csr;
552 			printf("DMA incomplete (%d/%d) status = %b\n",
553 			       ntrans, resid, csr, NCR5380_CSRBITS);
554 			if(csr != lastCSR) {
555 				int k = (csr & ~lastCSR) | (~csr & lastCSR);
556 				debug(("Changed status bits: %b\n",
557 				       k, NCR5380_CSRBITS));
558 				lastCSR = csr & 0xFF;
559 			}
560 #endif
561 			printf("DMA incomplete: ntrans=%d/%d, lock=%x\n",
562 			       ntrans, dh->dh_xlen, sc->sc_dflags);
563 			ncr_sc->sc_state |= NCR_ABORTING;
564 		}
565 
566 		if ((sc->sc_dflags & VSDMA_BLOCKED) == 0) {
567 			printf("not blocked during DMA.\n");
568 		}
569 		sc->sc_dflags &= ~VSDMA_BLOCKED;
570 		si_dmaReleaseBus(ncr_sc, VSDMA_DMABUSY);
571 	}
572 	if ((sc->sc_dflags & VSDMA_BLOCKED) != 0) {
573 		printf("blocked while not doing DMA.\n");
574 		sc->sc_dflags &= ~VSDMA_BLOCKED;
575 	}
576 
577 	/*
578 	 * Now, whatever it was, let the ncr5380sbc routine handle it...
579 	 */
580 	claimed = ncr5380_intr(ncr_sc);
581 #ifdef	DEBUG
582 	if (!claimed) {
583 		printf("si_intr: spurious from SBC\n");
584 		if (si_debug & 4) {
585 			Debugger();	/* XXX */
586 		}
587 	}
588 #endif
589 	trace(("%s: si-intr(%d) done, claimed=%d\n",
590 	       ncr_sc->sc_dev.dv_xname, count, claimed));
591 	return (claimed);
592 }
593 
594 
595 integrate void
596 si_reset_adapter(struct ncr5380_softc *ncr_sc)
597 {
598 	struct si_softc *sc = (struct si_softc *)ncr_sc;
599 	volatile struct si_regs *si = sc->sc_regs;
600 
601 #ifdef	DEBUG
602 	if (si_debug) {
603 		printf("si_reset_adapter\n");
604 	}
605 #endif
606 	SCI_CLR_INTR(ncr_sc);
607 }
608 
609 
610 /*****************************************************************
611  * Common functions for DMA
612  ****************************************************************/
613 
614 /*
615  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
616  * for DMA transfer.  On the Sun3, this means mapping the buffer
617  * into DVMA space.  dvma_mapin() flushes the cache for us.
618  */
619 void
620 si_dma_alloc(ncr_sc)
621 	struct ncr5380_softc *ncr_sc;
622 {
623 	struct si_softc *sc = (struct si_softc *)ncr_sc;
624 	struct sci_req *sr = ncr_sc->sc_current;
625 	struct scsi_xfer *xs = sr->sr_xs;
626 	struct buf *bp = sr->sr_xs->bp;
627 	struct si_dma_handle *dh;
628 	int i, xlen;
629 	u_long addr;
630 
631 	trace (("si_dma_alloc()\n"));
632 
633 #ifdef	DIAGNOSTIC
634 	if (sr->sr_dma_hand != NULL)
635 		panic("si_dma_alloc: already have DMA handle");
636 #endif
637 
638 	addr = (u_long) ncr_sc->sc_dataptr;
639 	debug(("addr=%x, dataptr=%x\n", addr, ncr_sc->sc_dataptr));
640 	xlen = ncr_sc->sc_datalen;
641 
642 	/* Make sure our caller checked sc_min_dma_len. */
643 	if (xlen < MIN_DMA_LEN)
644 		panic("si_dma_alloc: xlen=0x%x\n", xlen);
645 
646 	/*
647 	 * Never attempt single transfers of more than 63k, because
648 	 * our count register may be only 16 bits (an OBIO adapter).
649 	 * This should never happen since already bounded by minphys().
650 	 * XXX - Should just segment these...
651 	 */
652 	if (xlen > MAX_DMA_LEN) {
653 #ifdef DEBUG
654 		printf("si_dma_alloc: excessive xlen=0x%x\n", xlen);
655 		Debugger();
656 #endif
657 		ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
658 	}
659 
660 	/* Find free DMA handle.  Guaranteed to find one since we have
661 	   as many DMA handles as the driver has processes. */
662 	for (i = 0; i < SCI_OPENINGS; i++) {
663 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
664 			goto found;
665 	}
666 	panic("si: no free DMA handles.");
667 found:
668 
669 	dh = &sc->sc_dma[i];
670 	dh->dh_flags = SIDH_BUSY;
671 	dh->dh_addr = (u_char*) addr;
672 	dh->dh_maplen  = xlen;
673 	dh->dh_xlen  = xlen;
674 	dh->dh_dvma = 0;
675 
676 	/* Copy the "write" flag for convenience. */
677 	if (xs->flags & SCSI_DATA_OUT)
678 		dh->dh_flags |= SIDH_OUT;
679 
680 #if 1
681 	/*
682 	 * If the buffer has the flag B_PHYS, the the address specified
683 	 * in the buffer is a user-space address and we need to remap
684 	 * this address into kernel space so that using this buffer
685 	 * within the interrupt routine will work.
686 	 * If it's already a kernel space address, we need to make sure
687 	 * that all pages are in-core. the mapin() routine takes care
688 	 * of that.
689 	 */
690 	if (bp && (bp->b_flags & B_PHYS))
691 		dh->dh_flags |= SIDH_PHYS;
692 #endif
693 
694 	if (!bp) {
695 		printf("ncr.c: struct buf *bp is null-pointer.\n");
696 		dh->dh_flags = 0;
697 		return;
698 	}
699 	if (bp->b_bcount < 0 || bp->b_bcount > MAX_DMA_LEN) {
700 		printf("ncr.c: invalid bcount %d (0x%x)\n",
701 		       bp->b_bcount, bp->b_bcount);
702 		dh->dh_flags = 0;
703 		return;
704 	}
705 	dh->dh_dvma = bp->b_data;
706 #if 0
707 	/*
708 	 * mapping of user-space addresses is no longer neccessary, now
709 	 * that the vmapbuf/vunmapbuf routines exist. Now the higher-level
710 	 * driver already cares for the mapping!
711 	 */
712 	if (bp->b_flags & B_PHYS) {
713 		xdebug(("not mapping in... %x/%x %x\n", bp->b_saveaddr,
714 			bp->b_data, bp->b_bcount));
715 #ifdef USE_VMAPBUF
716 		dh->dh_addr = bp->b_data;
717 		dh->dh_maplen = bp->b_bcount;
718 		vmapbuf(bp, bp->b_bcount);
719 		dh->dh_dvma = bp->b_data;
720 #else
721 		dh->dh_dvma = (u_char*)vsdma_mapin(bp);
722 #endif
723 		xdebug(("addr %x, maplen %d, dvma %x, bcount %d, dir %s\n",
724 		       dh->dh_addr, dh->dh_maplen, dh->dh_dvma, bp->b_bcount,
725 		       (dh->dh_flags & SIDH_OUT ? "OUT" : "IN")));
726 	}
727 #endif
728 	/* success */
729 	sr->sr_dma_hand = dh;
730 
731 	return;
732 }
733 
734 
735 void
736 si_dma_free(ncr_sc)
737 	struct ncr5380_softc *ncr_sc;
738 {
739 	struct si_softc *sc = (struct si_softc *)ncr_sc;
740 	struct sci_req *sr = ncr_sc->sc_current;
741 	struct scsi_xfer *xs = sr->sr_xs;
742 	struct buf *bp = sr->sr_xs->bp;
743 	struct si_dma_handle *dh = sr->sr_dma_hand;
744 
745 	trace (("si_dma_free()\n"));
746 
747 #ifdef	DIAGNOSTIC
748 	if (dh == NULL)
749 		panic("si_dma_free: no DMA handle");
750 #endif
751 
752 	if (ncr_sc->sc_state & NCR_DOINGDMA)
753 		panic("si_dma_free: free while in progress");
754 
755 	if (dh->dh_flags & SIDH_BUSY) {
756 #if 0
757 		debug(("bp->b_flags=0x%x\n", bp->b_flags));
758 		if (bp->b_flags & B_PHYS) {
759 #ifdef USE_VMAPBUF
760 			printf("not unmapping(%x/%x %x/%x %d/%d)...\n",
761 			       dh->dh_addr, dh->dh_dvma,
762 			       bp->b_saveaddr, bp->b_data,
763 			       bp->b_bcount, dh->dh_maplen);
764 			/* vunmapbuf(bp, dh->dh_maplen); */
765 			printf("done.\n");
766 #endif
767 			dh->dh_dvma = 0;
768 		}
769 #endif
770 		dh->dh_flags = 0;
771 	}
772 	sr->sr_dma_hand = NULL;
773 }
774 
775 
776 /*
777  * REGBUSY and DMABUSY won't collide since the higher-level driver
778  * issues intr_on/intr_off before/after doing DMA. The only problem
779  * is to handle RDBUF/WRBUF wrt REGBUSY/DMABUSY
780  *
781  * There might be race-conditions, but for now we don't care for them...
782  */
783 int
784 si_dmaLockBus(ncr_sc, lt)
785 	struct ncr5380_softc *ncr_sc;
786 	int lt;			/* Lock-Type */
787 {
788 	struct si_softc *sc = (void*)ncr_sc;
789 	int timeout = 200;	/* wait .2 seconds max. */
790 
791 	trace(("si_dmaLockBus(%x), cold: %d, current: %x\n",
792 	       lt, cold, sc->sc_dflags));
793 
794 #ifdef POLL_MODE
795 	if (cold)
796 		return (0);
797 #endif
798 
799 	if ((ncr_sc->sc_current != NULL) && (lt == VSDMA_REGBUSY)) {
800 		printf("trying to use regs while sc_current is set.\n");
801 		printf("lt=%x, fl=%x, cur=%x\n",
802 		       lt, sc->sc_dflags, ncr_sc->sc_current);
803 	}
804 	if ((ncr_sc->sc_current == NULL) && (lt != VSDMA_REGBUSY)) {
805 		printf("trying to use/prepare DMA without current.\n");
806 		printf("lt=%x, fl=%x, cur=%x\n",
807 		       lt, sc->sc_dflags, ncr_sc->sc_current);
808 	}
809 
810 	if ((sc->sc_dflags & VSDMA_LOCKED) == 0) {
811 		struct si_softc *sc = (struct si_softc *)ncr_sc;
812 		sc->sc_dflags |= VSDMA_WANTED;
813 		vsbus_lockDMA(sc->sc_cfargs);
814 		sc->sc_dflags = VSDMA_LOCKED | lt;
815 		return (0);
816 	}
817 
818 #if 1
819 	while ((sc->sc_dflags & VSDMA_LCKTYPE) != lt) {
820 		debug(("busy wait(1)...\n"));
821 		if (--timeout == 0) {
822 			printf("timeout in busy-wait(%x %x)\n",
823 			       lt, sc->sc_dflags);
824 			sc->sc_dflags &= ~VSDMA_LCKTYPE;
825 			break;
826 		}
827 		delay(1000);
828 	}
829 	debug(("busy wait(1) done.\n"));
830 	sc->sc_dflags |= lt;
831 
832 #else
833 	if ((sc->sc_dflags & VSDMA_LCKTYPE) != lt) {
834 		switch (lt) {
835 
836 		case VSDMA_RDBUF:
837 			/* sc->sc_dflags |= VSDMA_IWANTED; */
838 			debug(("busy wait(1)...\n"));
839 			while (sc->sc_dflags &
840 			       (VSDMA_WRBUF | VSDMA_DMABUSY)) {
841 				if (--timeout == 0) {
842 					printf("timeout in busy-wait(1)\n");
843 					sc->sc_dflags &= ~VSDMA_WRBUF;
844 					sc->sc_dflags &= ~VSDMA_DMABUSY;
845 				}
846 				delay(1000);
847 			}
848 			/* sc->sc_dflags &= ~VSDMA_IWANTED; */
849 			debug(("busy wait(1) done.\n"));
850 			sc->sc_dflags |= lt;
851 			break;
852 
853 		case VSDMA_WRBUF:
854 			/* sc->sc_dflags |= VSDMA_IWANTED; */
855 			debug(("busy wait(2)...\n"));
856 			while (sc->sc_dflags &
857 			       (VSDMA_RDBUF | VSDMA_DMABUSY)) {
858 				if (--timeout == 0) {
859 					printf("timeout in busy-wait(2)\n");
860 					sc->sc_dflags &= ~VSDMA_RDBUF;
861 					sc->sc_dflags &= ~VSDMA_DMABUSY;
862 				}
863 				delay(1000);
864 			}
865 			/* sc->sc_dflags &= ~VSDMA_IWANTED; */
866 			debug(("busy wait(2) done.\n"));
867 			sc->sc_dflags |= lt;
868 			break;
869 
870 		case VSDMA_DMABUSY:
871 			/* sc->sc_dflags |= VSDMA_IWANTED; */
872 			debug(("busy wait(3)...\n"));
873 			while (sc->sc_dflags &
874 			       (VSDMA_RDBUF | VSDMA_WRBUF)) {
875 				if (--timeout == 0) {
876 					printf("timeout in busy-wait(3)\n");
877 					sc->sc_dflags &= ~VSDMA_RDBUF;
878 					sc->sc_dflags &= ~VSDMA_WRBUF;
879 				}
880 				delay(1000);
881 			}
882 			/* sc->sc_dflags &= ~VSDMA_IWANTED; */
883 			debug(("busy wait(3) done.\n"));
884 			sc->sc_dflags |= lt;
885 			break;
886 
887 		case VSDMA_REGBUSY:
888 			/* sc->sc_dflags |= VSDMA_IWANTED; */
889 			debug(("busy wait(4)...\n"));
890 			while (sc->sc_dflags &
891 			       (VSDMA_RDBUF | VSDMA_WRBUF | VSDMA_DMABUSY)) {
892 				if (--timeout == 0) {
893 					printf("timeout in busy-wait(4)\n");
894 					sc->sc_dflags &= ~VSDMA_RDBUF;
895 					sc->sc_dflags &= ~VSDMA_WRBUF;
896 					sc->sc_dflags &= ~VSDMA_DMABUSY;
897 				}
898 				delay(1000);
899 			}
900 			/* sc->sc_dflags &= ~VSDMA_IWANTED; */
901 			debug(("busy wait(4) done.\n"));
902 			sc->sc_dflags |= lt;
903 			break;
904 
905 		default:
906 			printf("illegal lockType %x in si_dmaLockBus()\n");
907 		}
908 	}
909 	else
910 		printf("already locked. (%x/%x)\n", lt, sc->sc_dflags);
911 #endif
912 	if (sc->sc_dflags & lt) /* successfully locked for this type */
913 		return (0);
914 
915 	printf("spurious %x in si_dmaLockBus(%x)\n", lt, sc->sc_dflags);
916 }
917 
918 /*
919  * the lock of this type is no longer needed. If all (internal) locks are
920  * released, release the DMA bus.
921  */
922 int
923 si_dmaReleaseBus(ncr_sc, lt)
924 	struct ncr5380_softc *ncr_sc;
925 	int lt;			/* Lock-Type */
926 {
927 	struct si_softc *sc = (void*)ncr_sc;
928 
929 	trace(("si_dmaReleaseBus(%x), cold: %d, current: %x\n",
930 	       lt, cold, sc->sc_dflags));
931 
932 #ifdef POLL_MODE
933 	if (cold)
934 		return (0);
935 #endif
936 
937 	if ((sc->sc_dflags & VSDMA_LCKTYPE) == lt) {
938 		sc->sc_dflags &= ~lt;
939 	}
940 	else
941 		printf("trying to release %x while flags = %x\n", lt,
942 		       sc->sc_dflags);
943 
944 	if (sc->sc_dflags == VSDMA_LOCKED) {	/* no longer needed */
945 		struct si_softc *sc = (struct si_softc *)ncr_sc;
946 		vsbus_unlockDMA(sc->sc_cfargs);
947 		sc->sc_dflags = 0;
948 		return (0);
949 	}
950 }
951 
952 /*
953  * Just toggle the type of lock without releasing the lock...
954  * This is usually needed before/after bcopy() to/from DMA-buffer
955  */
956 int
957 si_dmaToggleLock(ncr_sc, lt1, lt2)
958 	struct ncr5380_softc *ncr_sc;
959 	int lt1, lt2;		/* Lock-Type */
960 {
961 	struct si_softc *sc = (void*)ncr_sc;
962 
963 #ifdef POLL_MODE
964 	if (cold)
965 		return (0);
966 #endif
967 
968 	if (((sc->sc_dflags & lt1) != 0) &&
969 	    ((sc->sc_dflags & lt2) == 0)) {
970 		sc->sc_dflags |= lt2;
971 		sc->sc_dflags &= ~lt1;
972 		return (0);
973 	}
974 	printf("cannot toggle locking from %x to %x (current = %x)\n",
975 	       lt1, lt2, sc->sc_dflags);
976 }
977 
978 /*
979  * This is called when the bus is going idle,
980  * so we want to enable the SBC interrupts.
981  * That is controlled by the DMA enable!
982  * Who would have guessed!
983  * What a NASTY trick!
984  */
985 void
986 si_intr_on(ncr_sc)
987 	struct ncr5380_softc *ncr_sc;
988 {
989 	si_dmaReleaseBus(ncr_sc, VSDMA_REGBUSY);
990 }
991 
992 /*
993  * This is called when the bus is idle and we are
994  * about to start playing with the SBC chip.
995  *
996  * VS2000 note: we have four kinds of access which are mutually exclusive:
997  * - access to the NCR5380 registers
998  * - access to the HDC9224 registers
999  * - access to the DMA area
1000  * - doing DMA
1001  */
1002 void
1003 si_intr_off(ncr_sc)
1004 	struct ncr5380_softc *ncr_sc;
1005 {
1006 	si_dmaLockBus(ncr_sc, VSDMA_REGBUSY);
1007 }
1008 
1009 /*****************************************************************
1010  * VME functions for DMA
1011  ****************************************************************/
1012 
1013 
1014 /*
1015  * This function is called during the COMMAND or MSG_IN phase
1016  * that preceeds a DATA_IN or DATA_OUT phase, in case we need
1017  * to setup the DMA engine before the bus enters a DATA phase.
1018  *
1019  * XXX: The VME adapter appears to suppress SBC interrupts
1020  * when the FIFO is not empty or the FIFO count is non-zero!
1021  *
1022  * On the VME version we just clear the DMA count and address
1023  * here (to make sure it stays idle) and do the real setup
1024  * later, in dma_start.
1025  */
1026 void
1027 si_dma_setup(ncr_sc)
1028 	struct ncr5380_softc *ncr_sc;
1029 {
1030 	trace (("si_dma_setup(ncr_sc) !!!\n"));
1031 
1032 	/*
1033 	 * VS2000: nothing to do ...
1034 	 */
1035 }
1036 
1037 
1038 void
1039 si_dma_start(ncr_sc)
1040 	struct ncr5380_softc *ncr_sc;
1041 {
1042 	struct si_softc *sc = (struct si_softc *)ncr_sc;
1043 	struct sci_req *sr = ncr_sc->sc_current;
1044 	struct si_dma_handle *dh = sr->sr_dma_hand;
1045 	volatile struct si_regs *si = sc->sc_regs;
1046 	long data_pa;
1047 	int xlen;
1048 
1049 	trace(("si_dma_start(%x)\n", sr->sr_dma_hand));
1050 
1051 	/*
1052 	 * we always transfer from/to base of DMA-area,
1053 	 * thus the DMA-address is always the same, only size
1054 	 * and direction matter/differ on VS2000
1055 	 */
1056 
1057 	debug(("ncr_sc->sc_datalen = %d\n", ncr_sc->sc_datalen));
1058 	xlen = ncr_sc->sc_datalen;
1059 	dh->dh_xlen = xlen;
1060 
1061 	/*
1062 	 * VS2000 has a fixed 16KB-area where DMA is restricted to.
1063 	 * All DMA-addresses are relative to this base: KA410_DMA_BASE
1064 	 * Thus we need to copy the data into this area when writing,
1065 	 * or copy from this area when reading. (kind of bounce-buffer)
1066 	 */
1067 
1068 	/* Set direction (send/recv) */
1069 	if (dh->dh_flags & SIDH_OUT) {
1070 		/*
1071 		 * We know that we are called while intr_off (regs locked)
1072 		 * thus we toggle the lock from REGBUSY to WRBUF
1073 		 * also we set the BLOCKIT flag, so that the locking of
1074 		 * the DMA bus won't be released to the HDC9224...
1075 		 */
1076 		debug(("preparing msg-out (bcopy)\n"));
1077 		si_dmaToggleLock(ncr_sc, VSDMA_REGBUSY, VSDMA_WRBUF);
1078 		bcopy(dh->dh_dvma, sc->sc_dbase, xlen);
1079 		si_dmaToggleLock(ncr_sc, VSDMA_WRBUF, VSDMA_REGBUSY);
1080 		*sc->sc_ddreg = DMA_DIR_OUT;
1081 	}
1082 	else {
1083 		debug(("preparing data-in (bzero)\n"));
1084 		/* bzero(sc->sc_dbase, xlen); */
1085 		*sc->sc_ddreg = DMA_DIR_IN;
1086 	}
1087 	sc->sc_dflags |= VSDMA_BLOCKED;
1088 
1089 	*sc->sc_dareg = DMA_ADDR_HBYTE; /* high byte (6 bits) */
1090 	*sc->sc_dareg = DMA_ADDR_LBYTE; /* low byte */
1091 	*sc->sc_dcreg = 0 - xlen; /* bertram XXX */
1092 
1093 #ifdef	DEBUG
1094 	if (si_debug & 2) {
1095 		printf("si_dma_start: dh=0x%x, pa=0x%x, xlen=%d, creg=0x%x\n",
1096 			   dh, data_pa, xlen, *sc->sc_dcreg);
1097 	}
1098 #endif
1099 
1100 #ifdef POLL_MODE
1101 	debug(("dma_start: cold=%d\n", cold));
1102 	if (cold) {
1103 		*sc->intmsk &= ~sc->intbit;
1104 		*sc->intclr = sc->intbit;
1105 	}
1106 	else
1107 		*sc->intmsk |= sc->intbit;
1108 #endif
1109 	/*
1110 	 * Acknowledge the phase change.  (After DMA setup!)
1111 	 * Put the SBIC into DMA mode, and start the transfer.
1112 	 */
1113 	si_dmaToggleLock(ncr_sc, VSDMA_REGBUSY, VSDMA_DMABUSY);
1114 	if (dh->dh_flags & SIDH_OUT) {
1115 		*ncr_sc->sci_tcmd = PHASE_DATA_OUT;
1116 		SCI_CLR_INTR(ncr_sc);
1117 		*ncr_sc->sci_icmd = SCI_ICMD_DATA;
1118 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
1119 		*ncr_sc->sci_dma_send = 0;	/* start it */
1120 	} else {
1121 		*ncr_sc->sci_tcmd = PHASE_DATA_IN;
1122 		SCI_CLR_INTR(ncr_sc);
1123 		*ncr_sc->sci_icmd = 0;
1124 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
1125 		*ncr_sc->sci_irecv = 0; /* start it */
1126 	}
1127 	ncr_sc->sc_state |= NCR_DOINGDMA;
1128 	/*
1129 	 * having a delay (eg. printf) here, seems to solve the problem.
1130 	 * Isn't that strange ????
1131 	 * Maybe the higher-level driver accesses one of the registers of
1132 	 * the controller while DMA is in progress. Having a long enough
1133 	 * delay here might prevent/delay this access until DMA bus is
1134 	 * free again...
1135 	 *
1136 	 * The instruction ++++ printf("DMA started.\n"); ++++
1137 	 * is long/slow enough, to make the SSCI driver work. Thus we
1138 	 * try to find a delay() long/slow enough to do the same. The
1139 	 * argument to this delay is relative to the transfer-count.
1140 	 */
1141 	delay(3*xlen/4);		/* XXX solve this problem!!! XXX */
1142 
1143 #ifdef	DEBUG
1144 	if (si_debug & 2) {
1145 		printf("si_dma_start: started, flags=0x%x\n",
1146 			   ncr_sc->sc_state);
1147 	}
1148 #endif
1149 }
1150 
1151 
1152 void
1153 si_vme_dma_eop(ncr_sc)
1154 	struct ncr5380_softc *ncr_sc;
1155 {
1156 	trace (("si_vme_dma_eop() !!!\n"));
1157 	/* Not needed - DMA was stopped prior to examining sci_csr */
1158 }
1159 
1160 /*
1161  * si_dma_stop() has now become almost a nop-routine, since DMA-buffer
1162  * has already been read within si_intr(), so there's nothing left to do.
1163  */
1164 void
1165 si_dma_stop(ncr_sc)
1166 	struct ncr5380_softc *ncr_sc;
1167 {
1168 	struct si_softc *sc = (struct si_softc *)ncr_sc;
1169 	struct sci_req *sr = ncr_sc->sc_current;
1170 	struct si_dma_handle *dh = sr->sr_dma_hand;
1171 	volatile struct si_regs *si = sc->sc_regs;
1172 	int resid, ntrans;
1173 
1174 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
1175 #ifdef	DEBUG
1176 		printf("si_dma_stop: dma not running\n");
1177 #endif
1178 		return;
1179 	}
1180 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
1181 
1182 	/* Note that timeout may have set the error flag. */
1183 	if (ncr_sc->sc_state & NCR_ABORTING) {
1184 		printf("si_dma_stop: timeout?\n");
1185 		goto out;
1186 	}
1187 
1188 	/*
1189 	 * Now try to figure out how much actually transferred
1190 	 */
1191 	si_dmaLockBus(ncr_sc, VSDMA_DMABUSY);
1192 	si_dmaToggleLock(ncr_sc, VSDMA_DMABUSY, VSDMA_REGBUSY);
1193 	resid = *sc->sc_dcreg;
1194 	/*
1195 	 * XXX: don't correct at two places !!!
1196 	 */
1197 	if (resid == 1 && sc->sc_xflags) {
1198 		resid = 0;
1199 	}
1200 	ntrans = dh->dh_xlen + resid;
1201 	if (resid != 0)
1202 		printf("resid=%d, xlen=%d, ntrans=%d\n",
1203 		       resid, dh->dh_xlen, ntrans);
1204 
1205 #ifdef	DEBUG
1206 	if (si_debug & 2) {
1207 		printf("si_dma_stop: resid=0x%x ntrans=0x%x\n",
1208 		       resid, ntrans);
1209 	}
1210 #endif
1211 
1212 	if (ntrans < MIN_DMA_LEN) {
1213 		printf("si: fifo count: 0x%x\n", resid);
1214 		ncr_sc->sc_state |= NCR_ABORTING;
1215 		goto out;
1216 	}
1217 	if (ntrans > ncr_sc->sc_datalen)
1218 		panic("si_dma_stop: excess transfer");
1219 
1220 	/*
1221 	 * On VS2000 in case of a READ-operation, we must now copy
1222 	 * the buffer-contents to the destination-address!
1223 	 */
1224 	if ((dh->dh_flags & SIDH_OUT) == 0 &&
1225 	    (dh->dh_flags & SIDH_DONE) == 0) {
1226 		printf("DMA buffer not yet copied.\n");
1227 		si_dmaToggleLock(ncr_sc, VSDMA_REGBUSY, VSDMA_RDBUF);
1228 		bcopy(sc->sc_dbase, dh->dh_dvma, ntrans);
1229 		si_dmaToggleLock(ncr_sc, VSDMA_RDBUF, VSDMA_REGBUSY);
1230 	}
1231 	si_dmaReleaseBus(ncr_sc, VSDMA_REGBUSY);
1232 
1233 	/* Adjust data pointer */
1234 	ncr_sc->sc_dataptr += ntrans;
1235 	ncr_sc->sc_datalen -= ntrans;
1236 
1237 out:
1238 	si_dmaLockBus(ncr_sc, VSDMA_DMABUSY);
1239 
1240 	/* Put SBIC back in PIO mode. */
1241 	*ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
1242 	*ncr_sc->sci_icmd = 0;
1243 
1244 	si_dmaReleaseBus(ncr_sc, VSDMA_DMABUSY);
1245 }
1246 
1247 /*
1248  * Poll (spin-wait) for DMA completion.
1249  * Called right after xx_dma_start(), and
1250  * xx_dma_stop() will be called next.
1251  */
1252 void
1253 si_dma_poll(ncr_sc)
1254 	struct ncr5380_softc *ncr_sc;
1255 {
1256 	struct si_softc *sc = (struct si_softc *)ncr_sc;
1257 	struct sci_req *sr = ncr_sc->sc_current;
1258 	struct si_dma_handle *dh = sr->sr_dma_hand;
1259 	int i, timeout;
1260 
1261 	if (! cold)
1262 		printf("spurious call of DMA-poll ???");
1263 
1264 #ifdef POLL_MODE
1265 
1266 	delay(10000);
1267 	trace(("si_dma_poll(%x)\n", *sc->sc_dcreg));
1268 
1269 	/*
1270 	 * interrupt-request has been cleared by dma_start, thus
1271 	 * we do nothing else but wait for the intreq to reappear...
1272 	 */
1273 
1274 	timeout = 5000;
1275 	for (i=0; i<timeout; i++) {
1276 		if (*sc->intreq & sc->intbit)
1277 			break;
1278 		delay(100);
1279 	}
1280 	if ((*sc->intreq & sc->intbit) == 0) {
1281 		printf("si: DMA timeout (while polling)\n");
1282 		/* Indicate timeout as MI code would. */
1283 		sr->sr_flags |= SR_OVERDUE;
1284 	}
1285 #endif
1286 	return;
1287 }
1288